diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index e0859c0d..2a1c8deb 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -4,7 +4,7 @@ about: Suggest how we can improve EAGO --- -Thanks for your interest in EAGO! You can use this Github issue to suggest new +Thanks for your interest in EAGO! You can use this GitHub issue to suggest new features or make recommendations on additional documentation for EAGO. **If your feature request is related to a problem, please describe the problem.** diff --git a/LICENSE.md b/LICENSE.md index b4d8fc3d..30b86887 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,7 +1,21 @@ -> Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -> -> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -> -> The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -> -> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +MIT License + +Copyright (c) 2023 Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, Matthew Stuber, and the University of Connecticut (UConn) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/NEWS.md b/NEWS.md new file mode 100644 index 00000000..e21147a1 --- /dev/null +++ b/NEWS.md @@ -0,0 +1,154 @@ +# News for EAGO Releases + +## [v0.8.1](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.8.1) (June 15, 2023) + +- Resolved an issue where integer and binary variables would sometimes throw a `MathOptInterface.UpperBoundAlreadySet` error. +- Added the function `unbounded_check!` which warns users if they are missing variable bounds and sets them to +/- 1E10 by default. + - Added an EAGO parameter `unbounded_check` which defaults to `true` and enables `unbounded_check!`. +- Bumped requirement for PrettyTables.jl to v2+ to accommodate the latest version of DataFrames.jl. + +## [v0.8.0](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.8.0) (June 12, 2023) + +- Updated EAGO for compatibility with the nonlinear expression API changes introduced in JuMP v1.2: https://discourse.julialang.org/t/ann-upcoming-refactoring-of-jumps-nonlinear-api/83052. + - EAGO now uses the `MOI.Nonlinear` submodule instead of `JuMP._Derivatives`. + - Models, nodes, expressions, constraints, and operators are now compatible with MOI. +- Added logic and comparison operators to `EAGO.OperatorRegistry`. + +## [v0.7.3](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.7.3) (April 11, 2023) + +- Bumped DocStringExtensions.jl compatibility. + +## [v0.7.2](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.7.2) (November 22, 2022) + +- Added support for Julia 1.7. +- Bumped NaNMath.jl compatibility. +- Added `help?` information for various functions and structures. +- Updated documentation and some formatting. + +## [v0.7.1](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.7.1) (June 26, 2022) + +- Added the function `print_problem_summary`, an internal script used to display all constraints, objectives in a linear program which is added to functions for debug purposes while writing code. +- Adjusted default `EAGOParameters`. + - `branch_cvx_factor`: 0.5 => 0.25 + - `branch_offset`: 0.2 => 0.15 + - `time_limit` and `_time_left`: 1000.0 => 3600.0 + - `obbt_depth`: 0 => 6 + - `obbt_repetitions`: 1 => 3 + - `cut_tolerance_rel`: 1E-2 => 1E-3 +- Adjusted `Ipopt.Optimizer` attributes. + - `max_iter`: 20000 => 10000 + - `acceptable_iter`: 10000 => 1000 +- Excluded `test_quadratic_nonconvex_constraint_basic` from MOI tests. +- Restricted JuMP compatibility to 1.0.0 - 1.1.1. + +## [v0.7.0](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.7.0) (March 28, 2022) + +- Added envelopes of activation functions: `xabsx`, `logcosh` +- Added `estimator_extrema`, `estimator_under`, and `estimator_over` functions for McCormick relaxations. +- Moved various functions and related structures to new files. +- Added `RelaxCache` structure to hold relaxed problem information. +- Updated forward and reverse propagation. +- Added PrettyTables.jl. +- Added test examples. +- Added a memory allocation analysis. +- Updated documentation. + +## [v0.6.1](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.6.1) (March 4, 2021) + +- Minor update to tests. + +## [v0.6.0](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.6.0) (February 19, 2021) + +- License changed from CC BY-NC-SA 4.0 to MIT. +- Fix deprecated Ipopt constructor. +- Fix discrepancy between the returned objective value and the objective evaluated at the solution. +- Dramatically decrease allocates and first-run performance of SIP routines. +- Add two algorithms which modify `SIPRes` detailed in Djelassi, H. and Mitsos A. 2017. +- Fix objective interval fallback function. +- New SIP interface with extendable subroutines. +- Fix x^y relaxation bug. +- Add issues template. +- Add SIP subroutine documentation. + +## [v0.5.2](https://github.com/PSORLab/EAGO.jl/commit/bc59c5a8a5e26960c159e06e7b26e2e5c2472956) (November 18, 2020) + +- Fix user specified branching variables. + +## [v0.5.1](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.5.1) (November 18, 2020) + +- Support for Julia ~1 (with limited functionality for Julia 1.0 and 1.1). + +## [v0.5.0](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.5.0) (November 18, 2020) + +- Introduces the `register_eago_operators!(m::JuMP.Model)` which can be used to register all nonstandard nonlinear terms used in EAGO in any JuMP model. +- Introduces `positive`, `negative`, `lower_bnd`, `upper_bnd`, and `bnd` functions which can be used to enforce bounds on intermediate terms in nonlinear expressions (`EAGO.Optimizer` only). +- Adds envelopes: `abs2`, `sinpi`, `cospi`, `fma`, `cbrt` +- Adds envelopes and functions: `xlogx` +- Adds envelopes of special functions: `erf`, `erfc`, `erfinv`, `erfcinv` +- Adds envelopes of activation functions: `relu`, `gelu`, `elu`, `selu`, `swish`, `sigmoid`, `softsign`, `softplus`, `bisigmoid`, `pentanh`, `leaky_relu`, `param_relu` +- Error messages in `sip_explicit` have been made more transparent. +- Fixes some issues with documentation image rendering and links. +- Drops appveyor CI and Travis CI in favor of GitHub Actions. + +## [v0.4.2](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.4.2) (August 28, 2020) + +- Support for Julia 1.5. + +## [v0.4.1](https://github.com/PSORLab/EAGO.jl/commit/9c1bcf024a19840a0ac49c8c6da13619a5f3845f#comments) (June 17, 2020) + +- Minor bug fixes. + +## [v0.4.0](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.4.0) (June 12, 2020) + +- Support for new MOI/JuMP `RawParameter` input and a number of new attributes. +- Separates McCormick and ReverseMcCormick libraries (now [McCormick.jl](https://github.com/PSORLab/McCormick.jl) and [ReverseMcCormick.jl](https://github.com/PSORLab/ReverseMcCormick.jl)) from main package. McCormick.jl is reexported. +- Relaxation calculations now return NaN values on a domain violation. +- Tolerance based validation of cuts has been added to generate numerically safe cuts. +- Significantly simplify internal codebase for `EAGO.Optimizer` (no changes to API): fully decouples input problem specifications from the formulation used internally, stack only stores variables that are branched on, and a number of internal rearrangements to clearly delineate different routines. +- Add problem classification preprocessing that throws to simpler routines if LP problem types are detected (enables future support for SOCP, MILP, MISOCP, and Convex forms). +- Fix multiple bugs and add more transparent error codes. + +## [v0.3.1](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.3.1) (January 29, 2020) + +- Add unit tests. +- Support for Julia 1.3. +- Fix IntervalContractors.jl dependency issue. + +## [v0.3.0](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.3.0) (November 5, 2019) + +This update is intended to be the last to create a large number of breaking changes to the EAGO API. Please review the use cases provided in the documentation to update examples. +- A number of performance improvements have been made to the underlying McCormick relaxation library. +- The optimizer used to construct relaxations is now modified in place. +- All subproblem storage has been moved to the `Optimizer` object and storage types (e.g. `LowerInfo`) have been removed. +- A `BinaryMinMaxHeap` structure is now used to store nodes. +- Speed and aesthetics for logging and printing utilities have been updated. +- Subroutines are now customized by creating a subtype of `ExtensionType` and defining subroutines which dispatch on this new structure. +- Parametric interval methods and the Implicit optimizer have been move to a separate package (to be tagged shortly). +- JIT compilation time has been reduced substantially. +- Support for silent tag and time limits. + +## [v0.2.1](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.2.1) (July 7, 2019) + +- Bug fix for explicit SIP solving routine that occurred for uncertainty sets of dimension greater than 1. +- Bug fix for `MOI.MAX_SENSE` (max objective sense). + +## [v0.2.0](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.2.0) (June 14, 2019) + +This update creates a number of breaking changes to the EAGO API. Please review the use cases provided in the documentation to update examples. +- Updated to support Julia 1.0+, MathOptInterface (MOI), and MOI construction of subproblems. +- Additional domain reduction routines available. +- Support for specialized handling of linear and quadratic terms. +- Significant performance improvements due to pre-allocation of Wengert tapes and MOI support. +- A more intuitive API for McCormick relaxation construction. + +## [v0.1.2](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.1.2) (June 20, 2018) + +- Significant speed and functionality updates. + +## [v0.1.1](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.1.1) (June 7, 2018) + +- Initial release of combined EAGO packages. + +## [v0.1.0](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.1.0) (April 10, 2018) + +- Main global solver release. diff --git a/News.md b/News.md deleted file mode 100644 index 5dfa821e..00000000 --- a/News.md +++ /dev/null @@ -1,71 +0,0 @@ -# News for EAGO releases - -## v0.1.1 -- 4/12/2018: Initial release of combined EAGO packages v0.1.1. - -## v0.1.2 -- 6/20/2018: [**EAGO v0.1.2 has been tagged**](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.1.2). Significant speed and functionality updates. - -## v0.2.0 -- 6/14/2019: [**EAGO v0.2.0 has been tagged**](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.2.0). This update creates a number of breaking changes to the EAGO API. Please review the use cases provided in the documentation to update examples. - - Updated to support Julia 1.0+, MathOptInterface (MOI), and MOI construction of subproblems. - - Additional domain reduction routines available. - - Support for specialized handling of linear and quadratic terms. - - Significant performance improvements due to pre-allocation of Wengert tapes and MOI support. - - A more intuitive API for McCormick relaxation construction. - -## v0.2.1 -- 7/5/2019: [**EAGO v0.2.1 has been tagged**](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.2.1). This contains fixes for a few minor issues. - - Bug fix for explicit SIP solving routine that occurred for uncertainty sets of dimension greater than 1. - - Bug fix for Max objective sense. - -## v0.3.0 - - 11/1/2019: [**EAGO v0.3.0 has been tagged**](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.3.0): This update is intended to be the last to create a large number of breaking changes to the EAGO API. Please review the use cases provided in the documentation to update examples. - - A number of performance improvements have been made to the underlying McCormick relaxation library. - - The optimizer used to construct relaxations is now modified in place. - - All subproblem storage has been moved to the Optimizer object and storage types (e.g. LowerInfo) have been removed. - - A MinMax heap structure is now used to store nodes. - - Speed and aesthetics for logging and printing utilities have been updated. - - Subroutines are now customized by creating a subtype of 'ExtensionType' and defining subroutines which dispatch on this new structure. - - Parametric interval methods and the Implicit optimizer have been move to a separate package (to be tagged shortly.) - - JIT compilation time has been reduced substantially. - - Support for silent tag and time limits. - -## v0.4.0 - - 6/7/2020: [**EAGO v0.4.0 has been tagged**](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.4.0). - - Support for new MOI/JuMP `RawParameter` input and a number of new attributes. - - Separates McCormick and ReverseMcCormick libraries (now [McCormick.jl](https://github.com/PSORLab/McCormick.jl) and [ReverseMcCormick.jl](https://github.com/PSORLab/ReverseMcCormick.jl)) - from main package. McCormick.jl is reexported. - - Relaxation calculations now return NaN values on a domain violation. - - Tolerance based validation of cuts has been added to generate numerically safe cuts. - - Significantly simplify internal codebase for `EAGO.Optimizer` (no changes to API): fully decouples input problem specifications from the formulation used internally, stack only stores variables that are branched on, and a number of internal rearrangements to clearly delineate different routines. - - Add problem classification preprocessing that throws to simpler routines if LP problem types are detected (enables future support for SOCP, MILP, MISOCP, and Convex forms). - - Fix multiple bugs and add more transparent error codes. - - 06/17/2020: [**EAGO v0.4.1 has been tagged**](https://github.com/PSORLab/EAGO.jl/commit/9c1bcf024a19840a0ac49c8c6da13619a5f3845f#comments) Contains minor bug releases. - - 08/29/2020: [**EAGO v0.4.2 has been tagged**](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.4.2) Support for Julia v1.5. - -## v0.5.0 -- 11/18/2020: [**EAGO v0.5.0 has been tagged**](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.5.0) - - Introduces the `register_eago_operators!(m::JuMP.Model)` which can be used - to register all nonstandard nonlinear terms used in EAGO in any JuMP model. - - Introduces `positive`, `negative`, `lower_bnd`, `upper_bnd`, and `bnd` - functions which can be used to enforce bounds on intermediate terms in - nonlinear expressions (EAGO.Optimizer only). - - Adds envelopes: `abs2`, `sinpi`, `cospi`, `fma`, `cbrt`. - - Adds envelopes and functions: `xlogx` - - Adds envelopes of special functions: `erf`, `erfc`, `erfinv`, `erfcinv` - - Adds envelopes of activation functions: `relu`, `gelu`, `elu`, `selu`, `swish1`,`sigmoid`, `softsign`, `softplus`,`bisigmoid`, `pentanh`, `leaky_relu`, `param_relu`. - - Error messages in `sip_explicit` have been made more transparent. - - Fixes some issues with documentation image rendering and links. - - Drops appveyor CI and Travis CI in favor of Github Actions. -- 11/18/2020 [**EAGO v0.5.1 has been tagged**](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.5.1) - - Support for Julia ~1 (with limited functionality for Julia 1.0, 1.1). -- 11/18/2020 [**EAGO v0.5.2 has been tagged**](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.5.2) - - Fix user specified branching variables. - -## v0.8.0 -- 6/12/2023: [**EAGO v0.8.0 has been tagged**](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.8.0). - - Updated EAGO for compatibility with the nonlinear expression API changes introduced in JuMP v1.2: https://discourse.julialang.org/t/ann-upcoming-refactoring-of-jumps-nonlinear-api/83052 - - EAGO now uses the `MOI.Nonlinear` submodule instead of `JuMP._Derivatives`. - - Models, nodes, expressions, constraints, and operators are now compatible with MOI. - - Added logic and comparison operators to `EAGO.OperatorRegistry`. \ No newline at end of file diff --git a/Project.toml b/Project.toml index 8ec063b6..77730958 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "EAGO" uuid = "bb8be931-2a91-5aca-9f87-79e1cb69959a" -authors = ["Matthew Wilhelm "] +authors = ["Matthew Wilhelm , Robert Gottlieb , Dimitri Alston , and Matthew Stuber "] version = "0.8.1" [deps] @@ -36,7 +36,7 @@ ForwardDiff = "~0.10" IntervalArithmetic = "~0.20" IntervalContractors = "~0.4" Ipopt = "~1" -JuMP = "1.11" +JuMP = "1.12" MINLPTests = "0.5.2" MathOptInterface = "~1" McCormick = "0.13" diff --git a/README.md b/README.md index 43779b5e..e82e39e8 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ - + + +# EAGO - Easy Advanced Global Optimization -# EAGO: Easy-Advanced Global Optimization EAGO is an open-source development environment for **robust and global optimization** in Julia. | **Documentation** | **Linux/OS/Windows** | **Persistent DOI** | @@ -20,9 +21,9 @@ EAGO is a deterministic global optimizer designed to address a wide variety of o ## EAGO's Relaxations -For each nonlinear term, EAGO makes use of factorable representations to construct bounds and relaxations. In the case of `f(x) = x(x-5)sin(x)`, a list is generated and rules for constructing McCormick relaxations are used to formulate relaxations in the original `X` decision space1: +For each nonlinear term, EAGO makes use of factorable representations to construct bounds and relaxations. In the case of `f(x) = x(x-5)sin(x)`, a list is generated and rules for constructing McCormick relaxations are used to formulate relaxations in the original decision space, *X*1: -- *v*1 = x +- *v*1 = *x* - *v*2 = *v*1 - 5 - *v*3 = sin(*v*1) - *v*4 = *v*1*v*2 @@ -96,47 +97,58 @@ As a global optimization platform, EAGO's solvers can be used to find solutions ## Package Capabilities -The EAGO package has numerous features: a solver accessible from JuMP/MathOptInterface (MOI), domain reduction routines, McCormick relaxations, and specialized non-convex semi-infinite program solvers. A full description of all EAGO features is available on the [**documentation website**](https://psorlab.github.io/EAGO.jl/dev/). A series of example have been provided in the form of Jupyter notebooks in the separate [**EAGO-notebooks**](https://github.com/PSORLab/EAGO-notebooks) repository. +The EAGO package has numerous features: a solver accessible from JuMP/MathOptInterface (MOI), domain reduction routines, McCormick relaxations, and specialized nonconvex semi-infinite program solvers. A full description of all EAGO features is available on the [documentation website](https://psorlab.github.io/EAGO.jl/dev/). A series of example have been provided in the form of Jupyter Notebooks in the separate [EAGO-notebooks](https://github.com/PSORLab/EAGO-notebooks) repository. ## Recent News -- 6/15/2023: [**EAGO v0.8.1 has been tagged**](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.8.1). - - Resolved an issue where integer and binary variables would sometimes throw an `MathOptInterface.UpperBoundAlreadySet` error. - - Added the function `unbounded_check!` which warns users if they are missing variable bounds and sets them to +/- 1E10 by default. - - Added an EAGO parameter `unbounded_check` which defaults to `true` and enables `unbounded_check!`. - - Bumped requirement for PrettyTables.jl to v2+ to accomodate the latest version of DataFrames.jl. -For a full list of EAGO release news, click [**here**](https://github.com/PSORLab/EAGO.jl/releases). +### [v0.8.1](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.8.1) (June 15, 2023) + +- Resolved an issue where integer and binary variables would sometimes throw a `MathOptInterface.UpperBoundAlreadySet` error. +- Added the function `unbounded_check!` which warns users if they are missing variable bounds and sets them to +/- 1E10 by default. + - Added an EAGO parameter `unbounded_check` which defaults to `true` and enables `unbounded_check!`. +- Bumped requirement for PrettyTables.jl to v2+ to accommodate the latest version of DataFrames.jl. + +### [v0.8.0](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.8.0) (June 12, 2023) + +- Updated EAGO for compatibility with the nonlinear expression API changes introduced in JuMP v1.2: https://discourse.julialang.org/t/ann-upcoming-refactoring-of-jumps-nonlinear-api/83052. + - EAGO now uses the `MOI.Nonlinear` submodule instead of `JuMP._Derivatives`. + - Models, nodes, expressions, constraints, and operators are now compatible with MOI. +- Added logic and comparison operators to `EAGO.OperatorRegistry`. + +For a full list of EAGO release news, click [here](https://psorlab.github.io/EAGO.jl/stable/news/). ## Installing EAGO EAGO is a registered Julia package and it can be installed using the Julia package manager. -From the Julia REPL, type `]` to enter the Package manager (Pkg) mode and run the following command +From the Julia REPL, type `]` to enter the Package manager (Pkg) mode and run the following command: -```julia +```jldoctest pkg> add EAGO ``` -Currently, EAGO is tied to version 1.11 of JuMP. This allows a replication of some of the internal features shared by EAGO and JuMP's AD scheme, e.g., generation of Wergert Tapes, passing evaluators between JuMP and EAGO, etc. +Currently, EAGO is compatible with version 1.12+ of JuMP. This allows a replication of some of the internal features shared by EAGO and JuMP's AD scheme, e.g., generation of Wengert Tapes, passing evaluators between JuMP and EAGO, etc. -```julia +```jldoctest pkg> add JuMP ``` -EAGO v0.8.0 is the current tagged version and requires Julia 1.6+ for full functionality (however Julia 1.0+ versions support partial functionality). Use with version 1.8 is recommended as the majority of in-house testing has occurred using this version of Julia. The user is directed to the [**High-Performance Configuration**](https://psorlab.github.io/EAGO.jl/Optimizer/high_performance/) for instructions on how to install a high performance version of EAGO (rather than the basic entirely open-source version). -If any issues are encountered when loading EAGO (or when using it), please submit an issue using the Github [**issue tracker**](https://github.com/PSORLab/EAGO.jl/issues). +EAGO v0.8.1 is the current tagged version and requires Julia 1.6+ for full functionality (however Julia 1.0+ versions support partial functionality). Use with version 1.8 is recommended as the majority of in-house testing has occurred using this version of Julia. The user is directed to the [High-Performance Configuration](https://psorlab.github.io/EAGO.jl/optimizer/high_performance/) for instructions on how to install a high performance version of EAGO (rather than the basic entirely open-source version). +If any issues are encountered when loading EAGO (or when using it), please submit an issue using the GitHub [issue tracker](https://github.com/PSORLab/EAGO.jl/issues). ## Bug Reporting, Support, and Feature Requests -Please report bugs or feature requests by opening an issue using the Github [**issue tracker**](https://github.com/PSORLab/EAGO.jl/issues). All manners of feedback are encouraged. +Please report bugs or feature requests by opening an issue using the GitHub [issue tracker](https://github.com/PSORLab/EAGO.jl/issues). All manners of feedback are encouraged. ## Current Limitations + - Nonlinear handling assumes that box-constraints of nonlinear terms are available or can be inferred from bounds-tightening. - Only currently supports continuous functions. Support for mixed-integer problems is forthcoming. ## Work in Progress + - Extensions for nonconvex dynamic global & robust optimization. - Provide support for mixed-integer problems. -- Update EAGO to support nonsmooth problems (requires: a nonsmooth local nlp optimizer or lexiographic AD, support for relaxations is already included). +- Update EAGO to support nonsmooth problems (requires: a nonsmooth local nlp optimizer or lexicographic AD, support for relaxations is already included). - Performance assessment of nonlinear (differentiable) relaxations and incorporation into main EAGO routine. - Evaluation and incorporation of implicit relaxation routines in basic solver. @@ -144,18 +156,20 @@ Please report bugs or feature requests by opening an issue using the Github [**i Please cite the following paper when using EAGO. In plain text form this is: ``` - M. E. Wilhelm & M. D. Stuber (2020) EAGO.jl: easy advanced global optimization in Julia, - Optimization Methods and Software, DOI: 10.1080/10556788.2020.1786566 +M. E. Wilhelm & M. D. Stuber (2022) EAGO.jl: easy advanced global optimization in Julia, +Optimization Methods and Software, 37:2, 425-450, DOI: 10.1080/10556788.2020.1786566 ``` -A corresponding bibtex entry text is given below and a corresponding .bib file is given in citation.bib. -``` +A BibTeX entry is given below and a corresponding .bib file is given in citation.bib. +```bibtex @article{doi:10.1080/10556788.2020.1786566, -author = { M. E. Wilhelm and M. D. Stuber }, +author = {M. E. Wilhelm and M. D. Stuber}, title = {EAGO.jl: easy advanced global optimization in Julia}, journal = {Optimization Methods and Software}, -pages = {1-26}, -year = {2020}, +volume = {37}, +number = {2}, +pages = {425-450}, +year = {2022}, publisher = {Taylor & Francis}, doi = {10.1080/10556788.2020.1786566}, URL = {https://doi.org/10.1080/10556788.2020.1786566}, @@ -165,12 +179,13 @@ eprint = {https://doi.org/10.1080/10556788.2020.1786566} ## Related Packages -- [**ValidatedNumerics.jl**](https://github.com/JuliaIntervals/ValidatedNumerics.jl):A Julia library for validated interval calculations, including basic interval extensions, constraint programming, and interval contactors -- [**MAiNGO**](http://swmath.org/software/27878): An open-source mixed-integer nonlinear programming package in C++ that utilizes MC++ for relaxations. -- [**MC++**](https://omega-icl.github.io/mcpp/): A mature McCormick relaxation package in C++ that also includes McCormick-Taylor, Chebyshev +- [ValidatedNumerics.jl](https://github.com/JuliaIntervals/ValidatedNumerics.jl): A Julia library for validated interval calculations, including basic interval extensions, constraint programming, and interval contractors +- [MAiNGO](http://swmath.org/software/27878): An open-source mixed-integer nonlinear programming package in C++ that utilizes MC++ for relaxations. +- [MC++](https://omega-icl.github.io/mcpp/): A mature McCormick relaxation package in C++ that also includes McCormick-Taylor, Chebyshev Polyhedral and Ellipsoidal arithmetics. ## References + 1. A. Mitsos, B. Chachuat, and P. I. Barton. **McCormick-based relaxations of algorithms.** *SIAM Journal on Optimization*, 20(2):573–601, 2009. 2. K.A. Khan, HAJ Watson, P.I. Barton. **Differentiable McCormick relaxations.** *Journal of Global Optimization*, 67(4):687-729 (2017). 3. Stuber, M.D., Scott, J.K., Barton, P.I.: **Convex and concave relaxations of implicit functions.** *Optim. Methods Softw.* 30(3), 424–460 (2015) diff --git a/citation.bib b/citation.bib index 425e5f81..24275398 100644 --- a/citation.bib +++ b/citation.bib @@ -6,23 +6,29 @@ @article{doi:10.1080/10556788.2020.1786566, -author = { M. E. Wilhelm and M. D. Stuber }, +author = {M. E. Wilhelm and M. D. Stuber}, title = {EAGO.jl: easy advanced global optimization in Julia}, journal = {Optimization Methods and Software}, -volume = {0}, -number = {0}, -pages = {1-26}, -year = {2020}, +volume = {37}, +number = {2}, +pages = {425-450}, +year = {2022}, publisher = {Taylor & Francis}, doi = {10.1080/10556788.2020.1786566}, URL = { + https://doi.org/10.1080/10556788.2020.1786566 + + }, eprint = { + https://doi.org/10.1080/10556788.2020.1786566 + + } } diff --git a/code_analysis/create_precompile_statements.jl b/code_analysis/create_precompile_statements.jl index 679c56a3..21ed18c1 100644 --- a/code_analysis/create_precompile_statements.jl +++ b/code_analysis/create_precompile_statements.jl @@ -1,41 +1,42 @@ -# Added from Oscar Dowson code for JuMP at https://github.com/jump-dev/JuMP.jl/pull/2484 +# Added from Oscar Dowson's code for JuMP at https://github.com/jump-dev/JuMP.jl/pull/2484 generate_precompile = true -package_path = "C:\\Users\\wilhe\\Desktop\\Package Development" +package_path = "" example_path = "" module Foo -using JuMP -using EAGO + using JuMP + using EAGO -function stress_precompile() - for file in readdir("precompiles") - if !endswith(file, ".jl") - continue + function stress_precompile() + for file in readdir("precompiles") + if !endswith(file, ".jl") + continue + end + include(file) end - include(file) + return end - return -end -if generate_precompile - using SnoopCompile - tinf = @snoopi_deep Foo.stress_precompile() - ttot, pcs = SnoopCompile.parcel(tinf) - SnoopCompile.write("precompiles", pcs) - for file in readdir("precompiles") - if !endswith(file, ".jl") - continue + if generate_precompile + using SnoopCompile + tinf = @snoopi_deep Foo.stress_precompile() + ttot, pcs = SnoopCompile.parcel(tinf) + SnoopCompile.write("precompiles", pcs) + for file in readdir("precompiles") + if !endswith(file, ".jl") + continue + end + src = joinpath("precompiles", file) + m = match(r"precompile\_(.+)\.jl", file) + modules = split(m[1], ".") + modules = vcat(modules[1], "src", modules[2:end]) + if !(modules[1] in ["EAGO"]) + continue + end + dest = joinpath(package_path, modules..., "precompile.jl") + @show dest + cp(src, dest; force = true) end - src = joinpath("precompiles", file) - m = match(r"precompile\_(.+)\.jl", file) - modules = split(m[1], ".") - modules = vcat(modules[1], "src", modules[2:end]) - if !(modules[1] in ["EAGO"]) - continue - end - dest = joinpath(package_path, modules..., "precompile.jl") - @show dest - cp(src, dest; force = true) end end \ No newline at end of file diff --git a/code_analysis/single_thread_benchmark.jl b/code_analysis/single_thread_benchmark.jl index ca4a0f98..ec73a64c 100644 --- a/code_analysis/single_thread_benchmark.jl +++ b/code_analysis/single_thread_benchmark.jl @@ -3,7 +3,7 @@ using MINLPLib, JuMP, SCIP new_lib = "ImprovedCompMult" -# build library +# Build library build_lib = false instance_names = ["bearing", "ex6_2_10", "ex6_2_10", "ex6_2_11", "ex6_2_12", "ex6_2_13", "ex7_2_4", "ex7_3_1", "ex7_3_2", "ex14_1_8", "ex14_1_9"] if build_lib @@ -13,7 +13,7 @@ if build_lib end end -# solution handler +# Solution handler struct TrialSummary was_error::Bool solver_name::String @@ -50,8 +50,8 @@ trial_summaries = TrialSummary[] # Need to assign tolerance here (since tolerances aren't standardized among solvers) function build_scip() - m = SCIP.Optimizer(limits_gap=1E-3, # absolute tolerance - limits_absgap=1E-3 # relative tolerance + m = SCIP.Optimizer(limits_gap=1E-3, # Absolute tolerance + limits_absgap=1E-3 # Relative tolerance ) return m end diff --git a/docs/Project.toml b/docs/Project.toml index 904d4c64..e7da9706 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -10,4 +10,4 @@ StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" [compat] Documenter = "^0.26" -JuMP = "1.11" +JuMP = "1.12" diff --git a/docs/make.jl b/docs/make.jl index 1ea08dbd..fd447859 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -29,35 +29,36 @@ makedocs(modules = [EAGO, McCormick], canonical = "https://PSORLab.github.io/EAGO.jl/stable/", collapselevel = 1, ), - authors = "Matthew E. Wilhelm", + authors = "Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, and Matthew Stuber", #repo = "https://github.com/PSORLab/EAGO.jl/blob/{commit}{path}#L{line}", - sitename = "EAGO.jl: Easy Advanced Global Optimization", + sitename = "EAGO", pages = Any["Introduction" => "index.md", - "Quick Start" => Any["Quick_Start/qs_landing.md", - "Quick_Start/guidelines.md", - "Quick_Start/starting.md", - "Quick_Start/medium.md", - "Quick_Start/custom.md" + "Quick Start" => Any["quick_start/qs_landing.md", + "quick_start/guidelines.md", + "quick_start/simple.md", + "quick_start/medium.md", + "quick_start/difficult.md" ], - "McCormick Operator Library" => Any["McCormick/overview.md", - "McCormick/usage.md", - "McCormick/operators.md", - "McCormick/type.md", - "McCormick/implicit.md" + "McCormick Operator Library" => Any["mccormick/overview.md", + "mccormick/usage.md", + "mccormick/operators.md", + "mccormick/type.md", + "mccormick/implicit.md" ], - "Global Optimizer" => Any["Optimizer/optimizer.md", - "Optimizer/bnb_back.md", - "Optimizer/relax_back.md", - "Optimizer/domain_reduction.md", - "Optimizer/high_performance.md", - "Optimizer/udf_utilities.md" + "Optimizer" => Any["optimizer/optimizer.md", + "optimizer/bnb_back.md", + "optimizer/relax_back.md", + "optimizer/domain_reduction.md", + "optimizer/high_performance.md", + "optimizer/udf_utilities.md" ], - "Semi-Infinite Programming" => "SemiInfinite/semiinfinite.md", - "Contributing to EAGO" => Any["Dev/contributing.md", - "Dev/future.md" + "Semi-Infinite Programming" => "semiinfinite/semiinfinite.md", + "Contributing to EAGO" => Any["dev/contributing.md", + "dev/future.md" ], "References" => "ref.md", - "Citing EAGO" => "cite.md"] + "Citing EAGO" => "cite.md", + "News" => "news.md"] ) @info "Deploying documentation..." diff --git a/docs/src/Dev/contributing.md b/docs/src/Dev/contributing.md deleted file mode 100644 index 6688ded5..00000000 --- a/docs/src/Dev/contributing.md +++ /dev/null @@ -1,10 +0,0 @@ -# How to Contribute - -We're always happy to welcome work with additional collaborators and contributors. One -of the easy ways for newcomers to contribute is by adding additional relaxations. - -If you're interested in contributing in larger ways, please contact: -[**Robert Gottlieb**](https://psor.uconn.edu/person/robert-gottlieb/) - -If you have any requests for additional functionality, bug fixes, or comments, -please feel free to open a new issue using the Github [**issue tracker**](https://github.com/PSORLab/EAGO.jl/issues). diff --git a/docs/src/full_Logo1.png b/docs/src/Logo.png similarity index 100% rename from docs/src/full_Logo1.png rename to docs/src/Logo.png diff --git a/docs/src/McCormick/Figure_3.png b/docs/src/McCormick/Figure_3.png deleted file mode 100644 index ff43796e..00000000 Binary files a/docs/src/McCormick/Figure_3.png and /dev/null differ diff --git a/docs/src/McCormick/usage.md b/docs/src/McCormick/usage.md deleted file mode 100644 index 6eda0020..00000000 --- a/docs/src/McCormick/usage.md +++ /dev/null @@ -1,68 +0,0 @@ -# Basic Usage - -## Bounding a function via McCormick operators -In order to bound a function using a McCormick relaxation, you first construct a -McCormick object (`x::MC`) that bounds the input variables, and then you pass these -variables to the desired function. - -In the example below, convex/concave relaxations of the function `f(x) = x * (x-5.0) * sin(x)` -are calculated at `x = 2` on the interval `[1, 4]`. - -```julia -using EAGO, IntervalArithmetic - -# Define the function we want convex/concave relaxations for -f(x) = x*(x-5.0)*sin(x) - -# Create a MC object for x = 2.0 on [1.0, 4.0] to use as an -# input to f(x) -x = 2.0 # value of independent variable x -Intv = Interval(1.0,4.0) # desired interval to relax over -xMC = MC{1,NS}(x, Intv, 1) # 1-D non-smooth (NS) McCormick object, - # with a value of x and lower/upper - # bounds of Intv - -fMC = f(xMC) # relax the function by passing the MC object to it - -cv = fMC.cv # convex relaxation -cc = fMC.cc # concave relaxation -cvgrad = fMC.cv_grad # subgradient/gradient of convex relaxation -ccgrad = fMC.cc_grad # subgradient/gradient of concave relaxation -Iv = fMC.Intv # retrieve interval bounds of f(x) on Intv -``` - -By plotting the results we can easily visualize the convex and concave -relaxations, interval bounds, and affine bounds constructed using the subgradient -at the middle of X. - -![Figure_1](Figure_1.png) - - -If we instead use the constructor `xMC = MC{1,Diff}(x,Intv,1)` in the above code and re-plot, -we arrive at the following graph. Note that these relaxations are differentiable, but not as -tight as the non-smooth relaxations. - -![Figure_2](Figure_2.png) - -This functionality can be readily extended to multivariate functions as shown here: - -```julia - -f(x) = max(x[1],x[2]) - -x = [2.0 1.0] # values of independent variable x -Intv = [Interval(-4.0,5.0), Interval(-5.0,3.0)] # define intervals to relax over - -# create McCormick object -xMC = [MC{2,Diff}(x[i], Intv[i], i) for i=1:2)] - -fMC = f(xMC) # relax the function - -cv = fMC.cv # convex relaxation -cc = fMC.cc # concave relaxation -cvgrad = fMC.cv_grad # subgradient/gradient of convex relaxation -ccgrad = fMC.cc_grad # subgradient/gradient of concave relaxation -Iv = fMC.Intv # retrieve interval bounds of f(x) on Intv -``` - -![Figure_3](Figure_3.png) diff --git a/docs/src/cite.md b/docs/src/cite.md index 79ce5b68..26ebc0ca 100644 --- a/docs/src/cite.md +++ b/docs/src/cite.md @@ -4,6 +4,6 @@ Please cite the following paper when using EAGO.jl: ``` - M. E. Wilhelm & M. D. Stuber (2020) EAGO.jl: easy advanced global optimization in Julia, - Optimization Methods and Software, DOI: 10.1080/10556788.2020.1786566 +M. E. Wilhelm & M. D. Stuber (2022) EAGO.jl: easy advanced global optimization in Julia, +Optimization Methods and Software, 37:2, 425-450, DOI: 10.1080/10556788.2020.1786566 ``` diff --git a/docs/src/dev/contributing.md b/docs/src/dev/contributing.md new file mode 100644 index 00000000..91ab9a2c --- /dev/null +++ b/docs/src/dev/contributing.md @@ -0,0 +1,15 @@ +# How to Contribute + +We're always happy to welcome work with additional collaborators and contributors. One +of the easy ways for newcomers to contribute is by adding additional relaxations. + +If you have any requests for additional functionality, bug fixes, or comments, +please feel free to open a new issue using the GitHub [issue tracker](https://github.com/PSORLab/EAGO.jl/issues) or reach out to us. + +## Contact Us + +Please direct technical issues and/or bugs to the active developers: +- [Robert Gottlieb](https://psor.uconn.edu/person/robert-gottlieb/) +- [Dimitri Alston](https://psor.uconn.edu/person/dimitri-alston/) + +All other questions should be directed to [Prof. Stuber](https://chemical-biomolecular.engr.uconn.edu/person/matthew-stuber/). diff --git a/docs/src/Dev/future.md b/docs/src/dev/future.md similarity index 83% rename from docs/src/Dev/future.md rename to docs/src/dev/future.md index 51e42c1b..9f879d21 100644 --- a/docs/src/Dev/future.md +++ b/docs/src/dev/future.md @@ -1,14 +1,16 @@ # Future Work -## Current Activity: +## Current Activity + * Update CI testing. * Specialized algorithms for relaxing ODE constrained problems and solving global and robust optimization problems. * Extensions for nonconvex dynamic global & robust optimization. * Provide support for mixed-integer problems. -* Update EAGO to support nonsmooth problems (requires: a nonsmooth local nlp optimizer or lexiographic AD, support for relaxations is already included). +* Update EAGO to support nonsmooth problems (requires: a nonsmooth local nlp optimizer or lexicographic AD, support for relaxations is already included). * Evaluation and incorporation of implicit relaxation routines in basic solver. -## Other things on the wishlist (but not actively being worked on): +## Other Things on the Wishlist (But Not Actively Being Worked On) + * Implement the interval constraint propagation scheme presented in Vu 2008. For improved convergences. * A parametric bisection routine will be updated that can divide the `(X,P)` space into a series of boxes that all contain unique branches of the implicit function `p->y(p)`. * Provide a better interface the nonconvex semi-infinite programs solvers (JuMPeR extension?). diff --git a/docs/src/index.md b/docs/src/index.md index be87bc61..3df0206e 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -1,53 +1,50 @@ -![full_Logo](full_Logo1.png) +![Logo](Logo.png) -# **EAGO - Easy Advanced Global Optimization in Julia** +# EAGO - Easy Advanced Global Optimization in Julia -A flexible framework for global and robust optimization in Julia. +A development environment for robust and global optimization in Julia. ## Authors -- [Matthew Wilhelm](https://psor.uconn.edu/person/matthew-wilhelm/), Department of Chemical and Biomolecular Engineering, University of Connecticut (UCONN) -- [Robert Gottlieb](https://psor.uconn.edu/person/robert-gottlieb/), Department of Chemical and Biomolecular Engineering, University of Connecticut (UCONN) + +- [Matthew Wilhelm](https://psor.uconn.edu/person/matthew-wilhelm/), Department of Chemical and Biomolecular Engineering, University of Connecticut (UConn) + - Current Position: Alexion Pharmaceuticals +- [Robert Gottlieb](https://psor.uconn.edu/person/robert-gottlieb/), Department of Chemical and Biomolecular Engineering, University of Connecticut (UConn) +- [Dimitri Alston](https://psor.uconn.edu/person/dimitri-alston/), Department of Chemical and Biomolecular Engineering, University of Connecticut (UConn) +- [Matthew Stuber](https://chemical-biomolecular.engr.uconn.edu/person/matthew-stuber/), Associate Professor, University of Connecticut (UConn) + +If you would like to contribute, [contact us](https://psorlab.github.io/EAGO.jl/stable/dev/contributing/). ## Overview -**EAGO** is a global and robust optimization platform based on McCormick relaxations. -It contains the first widely accessible global optimization routine based on -generalized McCormick relaxations. With the exception of calls to local solvers -and linear algebra routines, EAGO is written entirely in native Julia. -The solver is flexibly arranged so the end user can easily customize low-level routines. + +EAGO is a global and robust optimization platform based on McCormick relaxations. It contains the first widely accessible global optimization routine based on generalized McCormick relaxations. With the exception of calls to local solvers and linear algebra routines, EAGO is written entirely in native Julia. The solver is flexibly arranged so the end user can easily customize low-level routines. ## Installing EAGO -EAGO is registered Julia package. It can be installed using the Julia package manager. -From the Julia REPL, type ] to enter the Pkg REPL mode and run the following command: -```julia +EAGO is a registered Julia package and it can be installed using the Julia package manager. +From the Julia REPL, type `]` to enter the Package manager (Pkg) mode and run the following command: + +```jldoctest pkg> add EAGO ``` -Currently, EAGO is tied version 1.0.0 - 1.1.1 of JuMP. This allows a replication -of some of the internal features shared by EAGO and JuMP's automatic differentiation -scheme, e.g., generation of Wergert Tapes, passing evaluators between JuMP and EAGO, etc. +Currently, EAGO is compatible with version 1.12+ of JuMP. This allows a replication of some of the internal features shared by EAGO and JuMP's automatic differentiation scheme, e.g., generation of Wengert Tapes, passing evaluators between JuMP and EAGO, etc. -```julia +```jldoctest pkg> add JuMP ``` -EAGO v0.7.1 is the current version, and it requires Julia 1.6+. Use with Julia 1.7 -is recommended as the majority of in-house testing occured using this version. -The user is directed to the [**High-Performance Configuration**](https://psorlab.github.io/EAGO.jl/stable/Optimizer/high_performance/) -section for instructions on how to set up a higher performance version of EAGO -(as opposed to the basic, entirely open-source version). If any issues are encountered -when loading EAGO (or when using it), please submit an issue using the Github [**issue tracker**](https://github.com/PSORLab/EAGO.jl/issues). +EAGO v0.8.1 is the current tagged version and requires Julia 1.6+ for full functionality (however Julia 1.0+ versions support partial functionality). Use with version 1.8 is recommended as the majority of in-house testing has occurred using this version of Julia. The user is directed to the [High-Performance Configuration](https://psorlab.github.io/EAGO.jl/optimizer/high_performance/) for instructions on how to install a high performance version of EAGO (rather than the basic entirely open-source version). +If any issues are encountered when loading EAGO (or when using it), please submit an issue using the GitHub [issue tracker](https://github.com/PSORLab/EAGO.jl/issues). ## Examples -Several examples are provided within this documentation, but additional examples -are provided in the form of Jupyter Notebooks at [**EAGO-notebooks**](https://github.com/PSORLab/EAGO-notebooks), -which can be run using IJulia. To add IJulia, run the command: -```julia +Several examples are provided within this documentation, but additional examples are provided in the form of Jupyter Notebooks at [EAGO-notebooks](https://github.com/PSORLab/EAGO-notebooks) which can be run using IJulia. To add IJulia, run the command: + +```jldoctest pkg> add IJulia ``` -Then launch the Jupyter notebook using the following command from the Julia terminal: +Then launch the Jupyter Notebook using the following command from the Julia terminal: ```julia julia> using IJulia; notebook() diff --git a/docs/src/McCormick/Figure_1.png b/docs/src/mccormick/Figure_1.png similarity index 100% rename from docs/src/McCormick/Figure_1.png rename to docs/src/mccormick/Figure_1.png diff --git a/docs/src/McCormick/Figure_2.png b/docs/src/mccormick/Figure_2.png similarity index 100% rename from docs/src/McCormick/Figure_2.png rename to docs/src/mccormick/Figure_2.png diff --git a/docs/src/mccormick/Figure_3.png b/docs/src/mccormick/Figure_3.png new file mode 100644 index 00000000..eaee7224 Binary files /dev/null and b/docs/src/mccormick/Figure_3.png differ diff --git a/docs/src/McCormick/implicit.md b/docs/src/mccormick/implicit.md similarity index 89% rename from docs/src/McCormick/implicit.md rename to docs/src/mccormick/implicit.md index ed796872..80215b13 100644 --- a/docs/src/McCormick/implicit.md +++ b/docs/src/mccormick/implicit.md @@ -1,11 +1,13 @@ # Relaxation of Implicit Functions -## High-level functions +## High-Level Functions + ```@docs implicit_relax_h! ``` -## DataStructures +## Data Structures + ```@docs McCormick.AbstractContractorMC NewtonGS @@ -17,6 +19,7 @@ MCCallback ``` ## Subroutines + ```@docs preconditioner_storage affine_exp! diff --git a/docs/src/McCormick/operators.md b/docs/src/mccormick/operators.md similarity index 73% rename from docs/src/McCormick/operators.md rename to docs/src/mccormick/operators.md index 5620a675..bc2524ab 100644 --- a/docs/src/McCormick/operators.md +++ b/docs/src/mccormick/operators.md @@ -1,48 +1,49 @@ -# Currently supported operators +# Currently Supported Operators The operators currently supported are listed below. The operators with a check box have been subject to a large degree of scrutiny and have been implemented for both forward and reverse McCormick relaxations ([Wechsung2015](https://link.springer.com/article/10.1007/s10898-015-0303-6)). Each McCormick object is associated with a parameter `T <: RelaxTag` which is either `NS` for nonsmooth relaxations ([Mitsos2009](https://epubs.siam.org/doi/abs/10.1137/080717341), [Scott2011](https://link.springer.com/article/10.1007/s10898-011-9664-7)), `MV` for multivariate relaxations ([Tsoukalas2014](https://link.springer.com/article/10.1007/s10898-014-0176-0), [Najman2017](https://link.springer.com/article/10.1007/s10898-016-0470-0)), -or `Diff` for differentiable relaxations ([Khan2016](https://link.springer.com/article/10.1007/s10898-016-0440-6), [Khan2018](https://link.springer.com/article/10.1007/s10898-017-0601-2), [Khan2019](https://www.tandfonline.com/doi/abs/10.1080/02331934.2018.1534108)). Conversion between `MV`, `NS`, and `Diff` relax tags is not currently supported. Convex and concave envelopes are used to compute relaxations of univariate functions. +or `Diff` for differentiable relaxations ([Khan2016](https://link.springer.com/article/10.1007/s10898-016-0440-6), [Khan2018](https://link.springer.com/article/10.1007/s10898-017-0601-2), [Khan2019](https://www.tandfonline.com/doi/abs/10.1080/02331934.2018.1534108)). Conversion between `NS`, `MV`, and `Diff` relax tags is not currently supported. Convex and concave envelopes are used to compute relaxations of univariate functions. -### **Univariate McCormick Operators** +## Univariate McCormick Operators Arbitrarily differentiable relaxations can be constructed for the following operators: -- [x] **Inverse** (inv) -- [x] **Logarithms** (log, log2, log10) -- [x] **Exponential Functions** (exp, exp2, exp10) -- [x] **Square Root** (sqrt) -- [x] **Absolute Value** (abs) +- **Inverse** (`inv`) +- **Logarithms** (`log`, `log2`, `log10`) +- **Exponential Functions** (`exp`, `exp2`, `exp10`) +- **Square Root** (`sqrt`) +- **Absolute Value** (`abs`) Both nonsmooth and Whitney-1 (once differentiable) relaxations are supported for the following operators: -- [x] **Step Functions** (step, sign) -- [x] **Trignometric Functions** (sin, cos, tan) -- [x] **Inverse Trignometric Functions** (asin, acos, atan) -- [x] **Hyperbolic Functions** (sinh, cosh, tanh) -- [x] **Inverse Hyperbolic Functions** (asinh, acosh, atanh) -- [x] **Common Activation Functions** (relu, leaky\_relu, param\_relu, sigmoid, bisigmoid, - softsign, softplus, maxtanh, pentanh, - GeLU, ELU, SELU, Swish-1) -- [x] **Special Functions** (erf) +- **Step Functions** (`step`, `sign`) +- **Trigonometric Functions** (`sin`, `cos`, `tan`) +- **Inverse Trigonometric Functions** (`asin`, `acos`, `atan`) +- **Hyperbolic Functions** (`sinh`, `cosh`, `tanh`) +- **Inverse Hyperbolic Functions** (`asinh`, `acosh`, `atanh`) +- **Common Activation Functions** (`relu`, `leaky_relu`, `param_relu`, `sigmoid`, `bisigmoid`, + `softsign`, `softplu`s, `maxtanh`, `pentanh`, + `gelu`, `elu`, `selu`, `swish`) +- **Special Functions** (`erf`) -### **Bivariate Operators: McCormick & McCormick** +## Bivariate Operators: McCormick & McCormick -The following bivariate operators are supported for two **MC** objects. Both nonsmooth and Whitney-1 (once differentiable) relaxations are supported. +The following bivariate operators are supported for two `MC` objects. Both nonsmooth and Whitney-1 (once differentiable) relaxations are supported. -- [x] **multiplication** (\*) -- [x] **division** (/) +- **Multiplication** (`*`) +- **Division** (`/`) Arbitrarily differentiable relaxations can be constructed for the following operators: -- [x] **addition** (+) -- [x] **subtraction** (-) -- [x] **minimization** (min) -- [x] **maximization** (max) +- **Addition** (`+`) +- **Subtraction** (`-`) +- **Minimization** (`min`) +- **Maximization** (`max`) + +## Common Subexpressions -### Common subexpressions The following functions can be used in place of common subexpressions encountered in optimization and will result in improved performance (in each case, the standard McCormick composition rules are often more expansive). @@ -53,7 +54,8 @@ xlogx mm ``` -### Bound Setting Functions +## Bound Setting Functions + The following functions are used to specify that known bounds on a subexpression exist and that the relaxation/interval bounds propagated should make use of this information. The utility functions can be helpful in avoiding domain violations @@ -68,7 +70,8 @@ upper_bnd bnd ``` -### Specialized Activation Functions +## Specialized Activation Functions + ```@docs pentanh leaky_relu @@ -76,7 +79,8 @@ bnd maxtanh ``` -### References +## References + - **Khan KA, Watson HAJ, Barton PI (2017).** Differentiable McCormick relaxations. *Journal of Global Optimization*, 67(4): 687-729. - **Khan KA, Wilhelm ME, Stuber MD, Cao H, Watson HAJ, Barton PI (2018).** Corrections to: Differentiable McCormick relaxations. *Journal of Global Optimization*, 70(3): 705-706. - **Khan KA (2019).** Whitney differentiability of optimal-value functions for bound-constrained convex programming problems. *Optimization*, 68(2-3): 691-711 diff --git a/docs/src/McCormick/overview.md b/docs/src/mccormick/overview.md similarity index 81% rename from docs/src/McCormick/overview.md rename to docs/src/mccormick/overview.md index 19a4d2d9..37a224f1 100644 --- a/docs/src/McCormick/overview.md +++ b/docs/src/mccormick/overview.md @@ -1,13 +1,16 @@ # Overview EAGO provides a library of McCormick relaxations in native Julia code. The EAGO optimizer supports -relaxing functions using **nonsmooth McCormick relaxations** ([Mitsos2009](https://epubs.siam.org/doi/abs/10.1137/080717341), [Scott2011](https://link.springer.com/article/10.1007/s10898-011-9664-7)), **smooth McCormick relaxations** ([Khan2016](https://link.springer.com/article/10.1007/s10898-016-0440-6), [Khan2018](https://link.springer.com/article/10.1007/s10898-017-0601-2), [Khan2019](https://www.tandfonline.com/doi/abs/10.1080/02331934.2018.1534108)), and **multi-variant McCormick relaxations** ([Tsoukalas2014](https://link.springer.com/article/10.1007/s10898-014-0176-0); a variant of **subgradient-based interval refinement** ([Najman2017](https://link.springer.com/article/10.1007/s10898-016-0470-0))). For functions with arbitrarily differentiable relaxations, the differentiable constant μ can be modified by adjusting a constant value in the package. Additionally, validated and nonvalidated interval bounds are supported via [**IntervalArithmetic.jl**](https://github.com/JuliaIntervals/IntervalArithmetic.jl) which is reexported. The basic McCormick operator and reverse McCormick operator ([Wechsung2015](https://link.springer.com/article/10.1007/s10898-015-0303-6)) libraries are included in two dependent subpackages which can loaded and used independently: -- **[McCormick.jl](https://github.com/PSORLab/McCormick.jl)**: A library of forward-mode and implciit McCormick operators. -- **[ReverseMcCormick.jl](https://github.com/PSORLab/ReverseMcCormick.jl)**: A reverse-mode McCormick operator library. +relaxing functions using **nonsmooth McCormick relaxations** ([Mitsos2009](https://epubs.siam.org/doi/abs/10.1137/080717341), [Scott2011](https://link.springer.com/article/10.1007/s10898-011-9664-7)), **smooth McCormick relaxations** ([Khan2016](https://link.springer.com/article/10.1007/s10898-016-0440-6), [Khan2018](https://link.springer.com/article/10.1007/s10898-017-0601-2), [Khan2019](https://www.tandfonline.com/doi/abs/10.1080/02331934.2018.1534108)), and **multi-variant McCormick relaxations** ([Tsoukalas2014](https://link.springer.com/article/10.1007/s10898-014-0176-0); a variant of **subgradient-based interval refinement** ([Najman2017](https://link.springer.com/article/10.1007/s10898-016-0470-0))). For functions with arbitrarily differentiable relaxations, the differentiable constant μ can be modified by adjusting a constant value in the package. Additionally, validated and nonvalidated interval bounds are supported via [IntervalArithmetic.jl](https://github.com/JuliaIntervals/IntervalArithmetic.jl) which is reexported. The basic McCormick operator and reverse McCormick operator ([Wechsung2015](https://link.springer.com/article/10.1007/s10898-015-0303-6)) libraries are included in two dependent subpackages which can loaded and used independently: +- [McCormick.jl](https://github.com/PSORLab/McCormick.jl): A library of forward-mode and implicit McCormick operators. +- [ReverseMcCormick.jl](https://github.com/PSORLab/ReverseMcCormick.jl): A reverse-mode McCormick operator library. ## NaN Numerics + When a relaxation is computed at an undefined point or over an unbounded domain, the resulting relaxation is defined as "not a number" (`NaN`) rather than throwing an error. This allows algorithms to check for these cases without resorting to `try-catch` statements. Moreover, when the interval domain is extensive enough to cause a domain violation, an `x::MC` structure is returned that satisfies `isnan(x) === true`. +## References + - **Khan KA, Watson HAJ, Barton PI (2017).** Differentiable McCormick relaxations. *Journal of Global Optimization*, 67(4): 687-729. - **Khan KA, Wilhelm ME, Stuber MD, Cao H, Watson HAJ, Barton PI (2018).** Corrections to: Differentiable McCormick relaxations. *Journal of Global Optimization*, 70(3): 705-706. - **Khan KA (2019).** Whitney differentiability of optimal-value functions for bound-constrained convex programming problems. *Optimization*, 68(2-3): 691-711 diff --git a/docs/src/McCormick/type.md b/docs/src/mccormick/type.md similarity index 95% rename from docs/src/McCormick/type.md rename to docs/src/mccormick/type.md index c6b9d9fd..156d5bb2 100644 --- a/docs/src/McCormick/type.md +++ b/docs/src/mccormick/type.md @@ -6,11 +6,13 @@ McCormick.RelaxTag ``` ## Constructors for MC + ```@docs MC{N,T}(y::Float64) ``` ## Internal Utilities + ```@docs mid3 mid3v @@ -24,7 +26,7 @@ McCormick.golden_section_it McCormick.golden_section ``` -## (Under development) MCNoGrad +## (Under Development) MCNoGrad A handful of applications make use of McCormick relaxations directly without the need for subgradients. We are currently adding support for a McCormick `struct` which omits subgradient propagation in favor of return diff --git a/docs/src/mccormick/usage.md b/docs/src/mccormick/usage.md new file mode 100644 index 00000000..79c70104 --- /dev/null +++ b/docs/src/mccormick/usage.md @@ -0,0 +1,77 @@ +# Basic Usage + +## Bounding a Function via McCormick Operators + +In order to bound a function using a McCormick relaxation, you first construct a +McCormick object (`x::MC`) that bounds the input variables, and then you pass these +variables to the desired function. + +In the example below, convex/concave relaxations of the function `f(x) = x(x-5)sin(x)` +are calculated at `x = 2` on the interval `[1,4]`. + +```julia +using McCormick + +# Create MC object for x = 2.0 on [1.0,4.0] for relaxing +# a function f(x) on the interval Intv + +f(x) = x*(x-5.0)*sin(x) + +x = 2.0 # Value of independent variable x +Intv = Interval(1.0,4.0) # Define interval to relax over + # Note that McCormick.jl reexports IntervalArithmetic.jl + # and StaticArrays. So no using statement for these is + # necessary. +# Create McCormick object +xMC = MC{1,NS}(x,Intv,1) + +fMC = f(xMC) # Relax the function + +cv = fMC.cv # Convex relaxation +cc = fMC.cc # Concave relaxation +cvgrad = fMC.cv_grad # Subgradient/gradient of convex relaxation +ccgrad = fMC.cc_grad # Subgradient/gradient of concave relaxation +Iv = fMC.Intv # Retrieve interval bounds of f(x) on Intv +``` + +By plotting the results we can easily visualize the convex and concave +relaxations, interval bounds, and affine bounds constructed using the subgradient +at the middle of X. + +![Figure_1](Figure_1.png) + +If we instead use the constructor `xMC = MC{1,Diff}(x,Intv,1)` in the above code and re-plot, +we arrive at the following graph. Note that these relaxations are differentiable, but not as +tight as the nonsmooth relaxations. + +![Figure_2](Figure_2.png) + +This can readily be extended to multivariate functions, for example, `f(x,y) = (4 - 2.1x^2 + (x^4)/6)x^2 + xy + (-4 + 4y^2)y^2`: + +```julia +using McCormick + +# Define function +f(x,y) = (4.0 - 2.1*x^2 + (x^4)/6.0)*x^2 + x*y + (-4.0 + 4.0*y^2)*y^2 + +# Define intervals for independent variables +n = 30 +X = Interval{Float64}(-2,0) +Y = Interval{Float64}(-0.5,0.5) +xrange = range(X.lo,stop=X.hi,length=n) +yrange = range(Y.lo,stop=Y.hi,length=n) + +# Calculate differentiable McCormick relaxation +for (i,x) in enumerate(xrange) + for (j,y) in enumerate(yrange) + z = f(x,y) # Calculate function values + xMC = MC{1,Diff}(x,X,1) # Differentiable relaxation for x + yMC = MC{1,Diff}(y,Y,2) # Differentiable relaxation for y + fMC = f(xMC,yMC) # Relax the function + cv = fMC.cv # Convex relaxation + cc = fMC.cc # Concave relaxation + end +end +``` + +![Figure_3](Figure_3.png) diff --git a/docs/src/news.md b/docs/src/news.md new file mode 100644 index 00000000..ee7ed449 --- /dev/null +++ b/docs/src/news.md @@ -0,0 +1,154 @@ +# News for EAGO Releases + +## [v0.8.1](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.8.1) (June 15, 2023) + +- Resolved an issue where integer and binary variables would sometimes throw a `MathOptInterface.UpperBoundAlreadySet` error. +- Added the function `unbounded_check!` which warns users if they are missing variable bounds and sets them to +/- 1E10 by default. + - Added an EAGO parameter `unbounded_check` which defaults to `true` and enables `unbounded_check!`. +- Bumped requirement for PrettyTables.jl to v2+ to accommodate the latest version of DataFrames.jl. + +## [v0.8.0](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.8.0) (June 12, 2023) + +- Updated EAGO for compatibility with the nonlinear expression API changes introduced in JuMP v1.2: https://discourse.julialang.org/t/ann-upcoming-refactoring-of-jumps-nonlinear-api/83052. + - EAGO now uses the `MOI.Nonlinear` submodule instead of `JuMP._Derivatives`. + - Models, nodes, expressions, constraints, and operators are now compatible with MOI. +- Added logic and comparison operators to `EAGO.OperatorRegistry`. + +## [v0.7.3](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.7.3) (April 11, 2023) + +- Bumped DocStringExtensions.jl compatibility. + +## [v0.7.2](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.7.2) (November 22, 2022) + +- Added support for Julia 1.7. +- Bumped NaNMath.jl compatibility. +- Added `help?` information for various functions and structures. +- Updated documentation and some formatting. + +## [v0.7.1](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.7.1) (June 26, 2022) + +- Added the function `print_problem_summary`, an internal script used to display all constraints, objectives in a linear program which is added to functions for debug purposes while writing code. +- Adjusted default `EAGOParameters`. + - `branch_cvx_factor`: 0.5 => 0.25 + - `branch_offset`: 0.2 => 0.15 + - `time_limit` and `_time_left`: 1000.0 => 3600.0 + - `obbt_depth`: 0 => 6 + - `obbt_repetitions`: 1 => 3 + - `cut_tolerance_rel`: 1E-2 => 1E-3 +- Adjusted `Ipopt.Optimizer` attributes. + - `max_iter`: 20000 => 10000 + - `acceptable_iter`: 10000 => 1000 +- Excluded `test_quadratic_nonconvex_constraint_basic` from MOI tests. +- Restricted JuMP compatibility to 1.0.0 - 1.1.1. + +## [v0.7.0](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.7.0) (March 28, 2022) + +- Added envelopes of activation functions: `xabsx`, `logcosh` +- Added `estimator_extrema`, `estimator_under`, and `estimator_over` functions for McCormick relaxations. +- Moved various functions and related structures to new files. +- Added `RelaxCache` structure to hold relaxed problem information. +- Updated forward and reverse propagation. +- Added PrettyTables.jl. +- Added test examples. +- Added a memory allocation analysis. +- Updated documentation. + +## [v0.6.1](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.6.1) (March 4, 2021) + +- Minor update to tests. + +## [v0.6.0](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.6.0) (February 19, 2021) + +- License changed from CC BY-NC-SA 4.0 to MIT. +- Fix deprecated Ipopt constructor. +- Fix discrepancy between the returned objective value and the objective evaluated at the solution. +- Dramatically decrease allocates and first-run performance of SIP routines. +- Add two algorithms which modify `SIPRes` detailed in Djelassi, H. and Mitsos A. 2017. +- Fix objective interval fallback function. +- New SIP interface with extendable subroutines. +- Fix x^y relaxation bug. +- Add issues template. +- Add SIP subroutine documentation. + +## [v0.5.2](https://github.com/PSORLab/EAGO.jl/commit/bc59c5a8a5e26960c159e06e7b26e2e5c2472956) (November 18, 2020) + +- Fix user specified branching variables. + +## [v0.5.1](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.5.1) (November 18, 2020) + +- Support for Julia ~1 (with limited functionality for Julia 1.0 and 1.1). + +## [v0.5.0](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.5.0) (November 18, 2020) + +- Introduces the `register_eago_operators!(m::JuMP.Model)` which can be used to register all nonstandard nonlinear terms used in EAGO in any JuMP model. +- Introduces `positive`, `negative`, `lower_bnd`, `upper_bnd`, and `bnd` functions which can be used to enforce bounds on intermediate terms in nonlinear expressions (`EAGO.Optimizer` only). +- Adds envelopes: `abs2`, `sinpi`, `cospi`, `fma`, `cbrt` +- Adds envelopes and functions: `xlogx` +- Adds envelopes of special functions: `erf`, `erfc`, `erfinv`, `erfcinv` +- Adds envelopes of activation functions: `relu`, `gelu`, `elu`, `selu`, `swish`, `sigmoid`, `softsign`, `softplus`, `bisigmoid`, `pentanh`, `leaky_relu`, `param_relu` +- Error messages in `sip_explicit` have been made more transparent. +- Fixes some issues with documentation image rendering and links. +- Drops appveyor CI and Travis CI in favor of GitHub Actions. + +## [v0.4.2](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.4.2) (August 28, 2020) + +- Support for Julia 1.5. + +## [v0.4.1](https://github.com/PSORLab/EAGO.jl/commit/9c1bcf024a19840a0ac49c8c6da13619a5f3845f#comments) (June 17, 2020) + +- Minor bug fixes. + +## [v0.4.0](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.4.0) (June 12, 2020) + +- Support for new MOI/JuMP `RawParameter` input and a number of new attributes. +- Separates McCormick and ReverseMcCormick libraries (now [McCormick.jl](https://github.com/PSORLab/McCormick.jl) and [ReverseMcCormick.jl](https://github.com/PSORLab/ReverseMcCormick.jl)) from main package. McCormick.jl is reexported. +- Relaxation calculations now return NaN values on a domain violation. +- Tolerance based validation of cuts has been added to generate numerically safe cuts. +- Significantly simplify internal codebase for `EAGO.Optimizer` (no changes to API): fully decouples input problem specifications from the formulation used internally, stack only stores variables that are branched on, and a number of internal rearrangements to clearly delineate different routines. +- Add problem classification preprocessing that throws to simpler routines if LP problem types are detected (enables future support for SOCP, MILP, MISOCP, and Convex forms). +- Fix multiple bugs and add more transparent error codes. + +## [v0.3.1](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.3.1) (January 29, 2020) + +- Add unit tests. +- Support for Julia 1.3. +- Fix IntervalContractors.jl dependency issue. + +## [v0.3.0](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.3.0) (November 5, 2019) + +This update is intended to be the last to create a large number of breaking changes to the EAGO API. Please review the use cases provided in the documentation to update examples. +- A number of performance improvements have been made to the underlying McCormick relaxation library. +- The optimizer used to construct relaxations is now modified in place. +- All subproblem storage has been moved to the `Optimizer` object and storage types (e.g. `LowerInfo`) have been removed. +- A `BinaryMinMaxHeap` structure is now used to store nodes. +- Speed and aesthetics for logging and printing utilities have been updated. +- Subroutines are now customized by creating a subtype of `ExtensionType` and defining subroutines which dispatch on this new structure. +- Parametric interval methods and the Implicit optimizer have been move to a separate package (to be tagged shortly). +- JIT compilation time has been reduced substantially. +- Support for silent tag and time limits. + +## [v0.2.1](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.2.1) (July 7, 2019) + +- Bug fix for explicit SIP solving routine that occurred for uncertainty sets of dimension greater than 1. +- Bug fix for `MOI.MAX_SENSE` (max objective sense). + +## [v0.2.0](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.2.0) (June 14, 2019) + +This update creates a number of breaking changes to the EAGO API. Please review the use cases provided in the documentation to update examples. +- Updated to support Julia 1.0+, MathOptInterface (MOI), and MOI construction of subproblems. +- Additional domain reduction routines available. +- Support for specialized handling of linear and quadratic terms. +- Significant performance improvements due to pre-allocation of Wengert tapes and MOI support. +- A more intuitive API for McCormick relaxation construction. + +## [v0.1.2](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.1.2) (June 20, 2018) + +- Significant speed and functionality updates. + +## [v0.1.1](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.1.1) (June 7, 2018) + +- Initial release of combined EAGO packages. + +## [v0.1.0](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.1.0) (April 10, 2018) + +- Main global solver release. diff --git a/docs/src/Optimizer/bnb_back.md b/docs/src/optimizer/bnb_back.md similarity index 92% rename from docs/src/Optimizer/bnb_back.md rename to docs/src/optimizer/bnb_back.md index e25c38c3..7dc9fb71 100644 --- a/docs/src/Optimizer/bnb_back.md +++ b/docs/src/optimizer/bnb_back.md @@ -4,6 +4,7 @@ This component is meant to provide a flexible framework for implementing spatial All components of the branch-and-bound routine can be customized by the individual user: lower bounding problem, upper bounding problem. ## Branch and Bound Node Storage + ```@docs EAGO.NodeBB ``` @@ -14,9 +15,10 @@ The global optimizer structure holds all information relevant to branch-and-boun EAGO.GlobalOptimizer ``` -# Customizable subroutines +# Customizable Subroutines + +## Stack Management Subroutines -## Stack management subroutines ```@docs EAGO.branch_node!(t::ExtensionType, m::GlobalOptimizer) EAGO.select_branch_variable(t::ExtensionType, m::GlobalOptimizer) @@ -27,7 +29,8 @@ The global optimizer structure holds all information relevant to branch-and-boun EAGO.single_storage!(t::ExtensionType, m::GlobalOptimizer) ``` -## Internal Subproblem Status Codes & Subsolver Management +## Internal Subproblem Status Codes and Subsolver Management + ```@docs EAGO.RelaxResultStatus EAGO.LocalResultStatus @@ -36,7 +39,8 @@ The global optimizer structure holds all information relevant to branch-and-boun EAGO.set_default_config!(t::ExtensionType, m::GlobalOptimizer) ``` -## Main subproblem and termination subroutines +## Main Subproblem and Termination Subroutines + ```@docs EAGO.convergence_check(t::ExtensionType, m::GlobalOptimizer) EAGO.cut_condition(t::ExtensionType, m::GlobalOptimizer) @@ -51,6 +55,7 @@ The global optimizer structure holds all information relevant to branch-and-boun ``` ## Internal Subroutines + ```@docs EAGO.is_integer_subproblem(m) EAGO.is_integer_feasible_local(m::GlobalOptimizer, d) @@ -69,7 +74,8 @@ The global optimizer structure holds all information relevant to branch-and-boun EAGO.local_problem_status!(t::MathOptInterface.TerminationStatusCode, r::MathOptInterface.ResultStatusCode) ``` -## Functions for generating console output +## Functions for Generating Console Output + ```@docs EAGO.print_iteration! EAGO.print_node! @@ -77,13 +83,15 @@ The global optimizer structure holds all information relevant to branch-and-boun EAGO.print_solution! ``` -## Support for log output at each iteration +## Support for Log Output at Each Iteration + ```@docs EAGO.Log EAGO.log_iteration!(x::GlobalOptimizer) ``` ## Interval Representations of Expressions + ```@docs EAGO.AbstractEAGOConstraint EAGO.AffineFunctionEq diff --git a/docs/src/Optimizer/domain_reduction.md b/docs/src/optimizer/domain_reduction.md similarity index 89% rename from docs/src/Optimizer/domain_reduction.md rename to docs/src/optimizer/domain_reduction.md index 783ac92a..fb8fc173 100644 --- a/docs/src/Optimizer/domain_reduction.md +++ b/docs/src/optimizer/domain_reduction.md @@ -1,6 +1,7 @@ # Domain Reduction ## Duality-Based Bound Tightening + Variable bound tightening based on the duality multipliers are supported. ```@docs @@ -8,6 +9,7 @@ variable_dbbt! ``` ## Special Forms + Bound tightening for linear forms, univariate quadratic forms, and bivariate quadratic forms are also supported. @@ -16,8 +18,9 @@ EAGO.fbbt! ``` ## Constraint Propagation + EAGO contains a constraint propagation architecture that supported forward and -reverse evaluation of set-valued functions on the directed acyclic graph (DAG). +reverse evaluation of set-valued functions on the directed acyclic graph. The interval contractor and reverse McCormick relaxation-based contractors are currently available. @@ -29,7 +32,7 @@ EAGO.set_constraint_propagation_fbbt! EAGO makes use of an optimization-based bound tightening scheme using filtering and greedy ordering as detailed in: Gleixner, A.M., Berthold, T., Müller, B. -et al. J Glob Optim (2017) 67: 731. https://doi.org/10.1007/s10898-016-0450-4. +et al. J Glob Optim (2017) 67: 731. [https://doi.org/10.1007/s10898-016-0450-](https://doi.org/10.1007/s10898-016-0450-4). ```@docs EAGO.obbt! diff --git a/docs/src/Optimizer/high_performance.md b/docs/src/optimizer/high_performance.md similarity index 66% rename from docs/src/Optimizer/high_performance.md rename to docs/src/optimizer/high_performance.md index c0817311..bf534e27 100644 --- a/docs/src/Optimizer/high_performance.md +++ b/docs/src/optimizer/high_performance.md @@ -5,13 +5,11 @@ By default, EAGO uses GLPK for solving linear subproblems introduced. Using a commercial linear solver is highly recommended such as Gurobi, CPLEX, or XPRESS is highly recommended. Both Gurobi and CPLEX are free for academics and -installation information can be found through http://www.gurobi.com/academia/academia-center and -https://www.ibm.com/developerworks/community/blogs/jfp/entry/CPLEX_Is_Free_For_Students?lang=en, respectively. +installation information can be found on the [Gurobi website](http://www.gurobi.com/academia/academia-center) and the [IBM website](https://www.ibm.com/developerworks/community/blogs/jfp/entry/CPLEX_Is_Free_For_Students?lang=en), respectively. A non-default LP solver can then be selected by the user via a series of keyword argument inputs as illustrated in the code snippet below. The `relaxed_optimizer` contains an instance optimizer with valid relaxations that are made at the root node and is updated with affine relaxations in place. ```julia - # Create opt EAGO Optimizer with Gurobi as a lower subsolver subsolver_config = SubSolvers(relaxed_optimizer = Gurobi.Optimizer(OutputFlag=0)) eago_factory = () -> EAGO.Optimizer(subsolvers = subsolver_config) @@ -20,7 +18,7 @@ m = Model(eago_factory) ## Rounding Mode -The `IntervalArithmetic.jl` package supports a number of different directed rounding +The [IntervalArithmetic.jl](https://github.com/JuliaIntervals/IntervalArithmetic.jl) package supports a number of different directed rounding modes. The default directed rounding mode is `:tight`. It is recommended that the user specify that `:accurate` directed rounding mode be used as it may results in a significant performance improvement. Setting a rounding mode can requires @@ -35,22 +33,21 @@ using EAGO # REST OF CODE ``` - ## Ipopt Build Ipopt is the recommended solver for upper bounding problems. Ipopt's performance is highly dependent on the linear algebra package used (up to 30x). By default MUMPS is used. -It's recommended that you either compile Ipopt with HSL MA57 or the Pardiso linear +It is recommended that you either compile Ipopt with HSL MA57 or the Pardiso linear algebra packages with a machine specific Blas library (for Intel users the JuliaPro MKL version is recommended). For information on this, see the below links: -- Compiling Ipopt: https://www.coin-or.org/Ipopt/documentation/node13.html -- Julia Specifics: +- [Compiling Ipopt](https://www.coin-or.org/Ipopt/documentation/node13.html) +- Julia specifics: - Pointing Ipopt to a compiled version: - - Ipopt Package Info: https://github.com/JuliaOpt/Ipopt.jl - - Discourse discussion: https://discourse.julialang.org/t/use-ipopt-with-custom-version/9176 + - [Ipopt Package Info](https://github.com/JuliaOpt/Ipopt.jl) + - [Discourse discussion](https://discourse.julialang.org/t/use-ipopt-with-custom-version/9176) - Issues using Pardiso: - - Ubuntu: https://github.com/JuliaOpt/Ipopt.jl/issues/106 - - Windows: https://github.com/JuliaOpt/Ipopt.jl/issues/83 -- HSL Website: http://www.hsl.rl.ac.uk/ipopt/ -- Pardiso Website: https://pardiso-project.org/ + - [Ubuntu](https://github.com/JuliaOpt/Ipopt.jl/issues/106) + - [Windows](https://github.com/JuliaOpt/Ipopt.jl/issues/83) +- [HSL website](http://www.hsl.rl.ac.uk/ipopt/) +- [Pardiso website](https://pardiso-project.org/) diff --git a/docs/src/Optimizer/optimizer.md b/docs/src/optimizer/optimizer.md similarity index 80% rename from docs/src/Optimizer/optimizer.md rename to docs/src/optimizer/optimizer.md index 48e0532d..70bed03b 100644 --- a/docs/src/Optimizer/optimizer.md +++ b/docs/src/optimizer/optimizer.md @@ -4,11 +4,13 @@ The `EAGO.Optimizer` object holds all algorithm solution information. A descript of all user-facing options has been provided in the docstring. ## EAGO.Optimizer + ```@docs Optimizer ``` -## EAGO Specific functions and operators +## EAGO Specific Functions and Operators + EAGO supports a number of functions and operators that for which specialized relaxation routines are available. These can be registered and added to a JuMP model using the function @@ -18,23 +20,27 @@ EAGO.register_eago_operators!(m::JuMP.Model) ``` ## Storage for Input Parameters + ```@docs EAGO.EAGOParameters ``` ## Internal Storage Structures + ```@docs VariableInfo ExtensionType ``` ## Internal Problem Representations + ```@docs EAGO.InputProblem EAGO.ParsedProblem ``` ## Interval Optimizer Subroutines + ```@docs EAGO.initial_parse!(m::Optimizer{R,S,T}) where {R,S,T} ``` @@ -47,7 +53,7 @@ which is a subtype of `EAGO.ExtensionType` and overloading methods associated wi this new structure. An instance of this new structure is provided to the `EAGO.Optimizer` using the `ext_type` keyword. This results in EAGO now dispatch to the new methods rather than the generally defined methods for the parent type. For a complete -example, the reader is directed to the [**interval bounding example**](https://github.com/PSORLab/EAGO-notebooks/blob/master/notebooks/nlpopt_interval_bnb.ipynb) and [**quasiconvex example**](https://github.com/PSORLab/EAGO-notebooks/blob/master/notebooks/custom_quasiconvex.ipynb). Alternatively, the user can overload the `optimize_hook!` for +example, the reader is directed to the [interval bounding example](https://github.com/PSORLab/EAGO-notebooks/blob/master/notebooks/nlpopt_interval_bnb.ipynb) and [quasiconvex example](https://github.com/PSORLab/EAGO-notebooks/blob/master/notebooks/custom_quasiconvex.ipynb). Alternatively, the user can overload the `optimize_hook!` for this subtype which will entirely circumvent the default global solution routine. Additional information can be stored in the `ext` field of EAGO. In order to allow for compatibility between packages the user is encouraged to append their extension name to the start of each diff --git a/docs/src/Optimizer/relax_back.md b/docs/src/optimizer/relax_back.md similarity index 96% rename from docs/src/Optimizer/relax_back.md rename to docs/src/optimizer/relax_back.md index 4c91e3e6..9e6d5cba 100644 --- a/docs/src/Optimizer/relax_back.md +++ b/docs/src/optimizer/relax_back.md @@ -2,7 +2,7 @@ ## Graphs, Caches, Forward and Reverse Propagation -EAGO makes use of a specialized tape structure for each function in order to compute valid composite bounds and relaxations. Each variable, constant, and expression is respresented by a node in a directed graph structure. +EAGO makes use of a specialized tape structure for each function in order to compute valid composite bounds and relaxations. Each variable, constant, and expression is represented by a node in a directed graph structure. ```@docs EAGO.Node @@ -62,7 +62,8 @@ Forward and reverse subroutines are overloaded for individual operators using th `fprop!(t::AbstractCacheAttribute, v::Val{AtomType}, g::AbstractDirectedGraph, b::AbstractCache, k::Int)` and `rprop!(t::AbstractCacheAttribute, v::Val{AtomType}, g::AbstractDirectedGraph, b::AbstractCache, k::Int)`. -## Other routines +## Other Routines + ```@docs EAGO.is_safe_cut!(m::GlobalOptimizer, f::MathOptInterface.ScalarAffineFunction{Float64}) -``` \ No newline at end of file +``` diff --git a/docs/src/Optimizer/udf_utilities.md b/docs/src/optimizer/udf_utilities.md similarity index 78% rename from docs/src/Optimizer/udf_utilities.md rename to docs/src/optimizer/udf_utilities.md index 380c10d4..54ad3234 100644 --- a/docs/src/Optimizer/udf_utilities.md +++ b/docs/src/optimizer/udf_utilities.md @@ -1,10 +1,11 @@ -# User-Define Functions and DAG Utilities +# User-Defined Functions (UDFs) and Directed Acyclic Graph (DAG) Utilities EAGO has included basic functionality to manipulate user-defined functions. These features are largely experimental and we're interested in providing additional for novel use cases. ## DAG Substitution and Flattening + ```@docs dag_flattening! flatten_expression! @@ -13,7 +14,8 @@ Template_Graph Template_Node ``` -## User-Defined Function (UDF) Scrubber +## UDF Scrubber + ```@docs scrub scrub! diff --git a/docs/src/Quick_Start/Equation_1.png b/docs/src/quick_start/Equation_1.png similarity index 100% rename from docs/src/Quick_Start/Equation_1.png rename to docs/src/quick_start/Equation_1.png diff --git a/docs/src/Quick_Start/custom.md b/docs/src/quick_start/difficult.md similarity index 82% rename from docs/src/Quick_Start/custom.md rename to docs/src/quick_start/difficult.md index 76752146..ab43db64 100644 --- a/docs/src/Quick_Start/custom.md +++ b/docs/src/quick_start/difficult.md @@ -1,14 +1,14 @@ -# Simple Example +# Difficult/Developer Example -(This example is also provided [here as a Jupyter Notebook](https://github.com/PSORLab/EAGO-notebooks/blob/master/notebooks/nlpopt_explicit_ann.ipynb)) +This example is also provided [here as a Jupyter Notebook](https://github.com/PSORLab/EAGO-notebooks/blob/master/notebooks/nlpopt_explicit_ann.ipynb) -In [1,2], a surrogate ANN model of bioreactor productivity was constructed by fitting -results from computationally expensive CFD simulations. The author then optimized this +A surrogate ANN model of bioreactor productivity was constructed by fitting +results from computationally expensive CFD simulations1,2. The authors then optimized this surrogate model to obtain ideal processing conditions. The optimization problem is given by: ![Equation 1](Equation_1.png) -## Input parameters +## Input Parameters In the first block, we input parameters values supplied in the paper for $W_1$, $W_2$, $B_1$, and $B_2$ into Julia as simple array objects. We also input bounds for the variables @@ -37,11 +37,11 @@ xLBD = [0.623, 0.093, 0.259, 6.56, 1114, 0.013, 0.127, 0.004] xUBD = [5.89, 0.5, 1.0, 90, 25000, 0.149, 0.889, 0.049]; ``` -## Construct the JuMP model and optimize +## Construct the JuMP Model and Optimize -We now formulate the problem using standard JuMP[3] syntax and optimize it. Note that +We now formulate the problem using standard JuMP3 syntax and optimize it. Note that we are forming an NLexpression object to handle the summation term to keep the code -visually simple, but this could be placed directly in the JuMP @NLobjective expression +visually simple, but this could be placed directly in the JuMP `@NLobjective` expression instead. ```julia @@ -55,7 +55,7 @@ model = Model(optimizer_with_attributes(EAGO.Optimizer, "absolute_tolerance" => optimize!(model) ``` -## Retrieve results +## Retrieve Results We then recover the objective value, the solution value, and termination status codes using standard JuMP syntax. The optimal value and solution values are then rescaled @@ -80,7 +80,8 @@ println("The rescaled optimal value is: $(round(rescaled_fval,digits=4))") println("The rescaled solution is $(round.(rescaled_xsol,digits=3)).") ``` -## Reference: +## References + 1. J. D. Smith, A. A. Neto, S. Cremaschi, and D. W. Crunkleton, CFD-based optimization of a flooded bed algae bioreactor, *Industrial & Engineering Chemistry Research*, 52 (2012), pp. 7181–7188 2. A. M. Schweidtmann and A. Mitsos. Global Deterministic Optimization with Artificial Neural Networks Embedded [https://arxiv.org/pdf/1801.07114.pdf](https://arxiv.org/pdf/1801.07114.pdf) 3. Iain Dunning and Joey Huchette and Miles Lubin. JuMP: A Modeling Language for Mathematical Optimization, *SIAM Review*, 59 (2017), pp. 295-320. diff --git a/docs/src/Quick_Start/guidelines.md b/docs/src/quick_start/guidelines.md similarity index 97% rename from docs/src/Quick_Start/guidelines.md rename to docs/src/quick_start/guidelines.md index 53386174..6b7be101 100644 --- a/docs/src/Quick_Start/guidelines.md +++ b/docs/src/quick_start/guidelines.md @@ -20,6 +20,7 @@ struct MyNewStruct <: EAGO.ExtensionType end To let EAGO know that you would like to use this extension (and any functions you overload), when you create the JuMP model, declare your new type in the SubSolvers field of EAGO's optimizer as follows: + ```julia using JuMP @@ -55,9 +56,9 @@ The user-defined preprocessing step can be as simple or complex as desired, but if `_preprocess_feasibility` is not set to `true`, EAGO will assume each node is infeasible. -## 3) Lower problem +## 3) Lower Problem -By default, EAGO applies Kelley's cutting-plane algorithm[1] to solve the lower bounding +By default, EAGO applies Kelley's cutting-plane algorithm1 to solve the lower bounding problem. This can be overloaded using the same syntax as for the other functions. Necessary changes to the `EAGO.GlobalOptimizer` that occur within the lower problem are changing the `_lower_objective_value` and `_lower_feasibility` fields. If the @@ -81,7 +82,7 @@ an algorithm that also calculates an upper objective value, the necessary fields to update in `upper_problem!` can simply be updated here, and the `upper_problem!` can be overloaded by a function that does `nothing`. -## 4) Upper problem +## 4) Upper Problem By default, the upper bounding problem is run on every node up to depth `upper_bounding_depth`, and is triggered with a probability of `0.5^(depth - upper_bounding_depth)` @@ -119,7 +120,7 @@ definition for the `lower_problem!`, the `_upper_feasibility` flag must be set to `true`. If this is not done, the change to the `_upper_objective_value` will be discarded. -## 5) Convergence check +## 5) Convergence Check By default, EAGO checks to see if the lower and upper bounds have converged to within either the absolute or relative tolerance. This method of checking convergence @@ -157,7 +158,7 @@ end If `_postprocess_feasibility` is not set to `true`, no nodes will be branched on. -## 7) Termination check +## 7) Termination Check This is the check that occurs on each iteration of the branch-and-bound algorithm that determines whether the algorithm continues or not. By default, several @@ -189,8 +190,7 @@ function EAGO.termination_check(t::MyNewStruct, x::EAGO.GlobalOptimizer) end ``` +## References - -## References: 1. Kelley, J. E. “The Cutting-Plane Method for Solving Convex Programs.” *Journal of the Society for Industrial and Applied Mathematics*, vol. 8, no. 4, pp. 703–12 (1960). -2. Tawarmalani, M., Sahinidis, N. V. "Global optimization of mixed-integer nonlinear programs: A theoretical and computational study." *Math. Program., Ser. A*, 99, pp. 563-591 (2004). \ No newline at end of file +2. Tawarmalani, M., Sahinidis, N. V. "Global optimization of mixed-integer nonlinear programs: A theoretical and computational study." *Math. Program., Ser. A*, 99, pp. 563-591 (2004). diff --git a/docs/src/Quick_Start/medium.md b/docs/src/quick_start/medium.md similarity index 81% rename from docs/src/Quick_Start/medium.md rename to docs/src/quick_start/medium.md index 6641a183..5e541ec2 100644 --- a/docs/src/Quick_Start/medium.md +++ b/docs/src/quick_start/medium.md @@ -1,9 +1,9 @@ -# Simple Example +# Medium-Difficulty Example -(This example is also provided [here as a Jupyter Notebook](https://github.com/PSORLab/EAGO-notebooks/blob/master/notebooks/custom_quasiconvex.ipynb)) +This example is also provided [here as a Jupyter Notebook](https://github.com/PSORLab/EAGO-notebooks/blob/master/notebooks/custom_quasiconvex.ipynb) In this example, we'll adapt EAGO to implement the bisection-based algorithm used to solve -the quasiconvex optimization problem presented in [1]: +a quasiconvex optimization problem12. ## EAGO Implementation - - In the first block, we input parameters values supplied in the paper for $W_1$, $W_2$, $B_1$, and $B_2$ into Julia as simple array objects. We also input bounds for the variables which are used to scale the values obtained from optimization from [-1, 1] back into the @@ -55,11 +52,11 @@ xLBD = [0.623, 0.093, 0.259, 6.56, 1114, 0.013, 0.127, 0.004] xUBD = [5.89, 0.5, 1.0, 90, 25000, 0.149, 0.889, 0.049]; ``` -## Construct the JuMP model and optimize +## Construct the JuMP Model and Optimize -We now formulate the problem using standard JuMP[3] syntax and optimize it. Note that +We now formulate the problem using standard JuMP3 syntax and optimize it. Note that we are forming an NLexpression object to handle the summation term to keep the code -visually simple, but this could be placed directly in the JuMP @NLobjective expression +visually simple, but this could be placed directly in the JuMP `@NLobjective` expression instead. ```julia @@ -73,7 +70,7 @@ model = Model(optimizer_with_attributes(EAGO.Optimizer, "absolute_tolerance" => optimize!(model) ``` -## Retrieve results +## Retrieve Results We then recover the objective value, the solution value, and termination status codes using standard JuMP syntax. The optimal value and solution values are then rescaled @@ -98,6 +95,8 @@ println("The rescaled optimal value is: $(round(rescaled_fval,digits=4))") println("The rescaled solution is $(round.(rescaled_xsol,digits=3)).") ``` -## Reference: +## References + 1. C. Jansson, Quasiconvex relaxations based on interval arithmetic, Linear Algebra and its Applications, 324 (2001), pp. 27–53. -2. S. Boyd and L. Vandenberghe, Convex optimization, Cambridge University Press, 2004. \ No newline at end of file +2. S. Boyd and L. Vandenberghe, Convex optimization, Cambridge University Press, 2004. +3. Iain Dunning and Joey Huchette and Miles Lubin. JuMP: A Modeling Language for Mathematical Optimization, *SIAM Review*, 59 (2017), pp. 295-320. diff --git a/docs/src/Quick_Start/qc_Equation_1.png b/docs/src/quick_start/qc_Equation_1.png similarity index 100% rename from docs/src/Quick_Start/qc_Equation_1.png rename to docs/src/quick_start/qc_Equation_1.png diff --git a/docs/src/Quick_Start/qc_Equation_2.png b/docs/src/quick_start/qc_Equation_2.png similarity index 100% rename from docs/src/Quick_Start/qc_Equation_2.png rename to docs/src/quick_start/qc_Equation_2.png diff --git a/docs/src/Quick_Start/qc_Equation_3.png b/docs/src/quick_start/qc_Equation_3.png similarity index 100% rename from docs/src/Quick_Start/qc_Equation_3.png rename to docs/src/quick_start/qc_Equation_3.png diff --git a/docs/src/Quick_Start/qs_landing.md b/docs/src/quick_start/qs_landing.md similarity index 52% rename from docs/src/Quick_Start/qs_landing.md rename to docs/src/quick_start/qs_landing.md index 09fb78eb..d0fca0d3 100644 --- a/docs/src/Quick_Start/qs_landing.md +++ b/docs/src/quick_start/qs_landing.md @@ -10,7 +10,7 @@ EAGO is designed to be easily extensible. Some of the examples that follow inclu cases where the standard EAGO functionality is overloaded and readily incorporated into the main optimization routine. Information on how to extend the main branch-and-bound functions (including lower and upper bounding routines) can be found in the -**Customization Guidelines** section. +[Customization Guidelines](https://psorlab.github.io/EAGO.jl/dev/quick_start/guidelines/) section. ## Examples @@ -18,7 +18,6 @@ The following pages in this section include several representative examples of h can be used. Additional (and in some cases, shortened) examples can be found in the [EAGO-notebooks repository](https://github.com/PSORLab/EAGO-notebooks/blob/master/notebooks). Examples and instructional pages in this section include: -* **Simple Example**: A base-case optimization problem solved using the EAGO optimizer. No extensions or function overloading required. -* **Medium-Difficulty Example** A quasiconvex optimization problem solved by overloading some of EAGO's functionality to implement a bisection-based algorithm instead of typical branch-and-bound. (TODO, but see the [Jupyter Notebook version](https://github.com/PSORLab/EAGO-notebooks/blob/master/notebooks/custom_quasiconvex.ipynb)) -* **Difficult/Developer Example** Overloading the branch-and-bound algorithm with a custom extension type. (TODO) - +- [Simple Example](https://psorlab.github.io/EAGO.jl/dev/quick_start/simple/): A base-case optimization problem solved using the EAGO optimizer. No extensions or function overloading required. +- [Medium-Difficulty Example](https://psorlab.github.io/EAGO.jl/dev/quick_start/medium/): A quasiconvex optimization problem solved by overloading some of EAGO's functionality to implement a bisection-based algorithm instead of typical branch-and-bound. (TODO, but see the [Jupyter Notebook version](https://github.com/PSORLab/EAGO-notebooks/blob/master/notebooks/custom_quasiconvex.ipynb)) +- [Difficult/Developer Example](https://psorlab.github.io/EAGO.jl/dev/quick_start/difficult/): Overloading the branch-and-bound algorithm with a custom extension type. (TODO) diff --git a/docs/src/Quick_Start/starting.md b/docs/src/quick_start/simple.md similarity index 83% rename from docs/src/Quick_Start/starting.md rename to docs/src/quick_start/simple.md index 4cc05414..cac53609 100644 --- a/docs/src/Quick_Start/starting.md +++ b/docs/src/quick_start/simple.md @@ -1,14 +1,14 @@ # Simple Example -(This example is also provided [here as a Jupyter Notebook](https://github.com/PSORLab/EAGO-notebooks/blob/master/notebooks/nlpopt_explicit_ann.ipynb)) +This example is also provided [here as a Jupyter Notebook](https://github.com/PSORLab/EAGO-notebooks/blob/master/notebooks/nlpopt_explicit_ann.ipynb) -In [1,2], a surrogate ANN model of bioreactor productivity was constructed by fitting -results from computationally expensive CFD simulations. The author then optimized this +A surrogate ANN model of bioreactor productivity was constructed by fitting +results from computationally expensive CFD simulations1,2. The authors then optimized this surrogate model to obtain ideal processing conditions. The optimization problem is given by: ![Equation 1](Equation_1.png) -## Input parameters +## Input Parameters In the first block, we input parameters values supplied in the paper for $W_1$, $W_2$, $B_1$, and $B_2$ into Julia as simple array objects. We also input bounds for the variables @@ -37,11 +37,11 @@ xLBD = [0.623, 0.093, 0.259, 6.56, 1114, 0.013, 0.127, 0.004] xUBD = [5.89, 0.5, 1.0, 90, 25000, 0.149, 0.889, 0.049]; ``` -## Construct the JuMP model and optimize +## Construct the JuMP Model and Optimize -We now formulate the problem using standard JuMP[3] syntax and optimize it. Note that +We now formulate the problem using standard JuMP3 syntax and optimize it. Note that we are forming an NLexpression object to handle the summation term to keep the code -visually simple, but this could be placed directly in the JuMP @NLobjective expression +visually simple, but this could be placed directly in the JuMP `@NLobjective` expression instead. ```julia @@ -55,7 +55,7 @@ model = Model(optimizer_with_attributes(EAGO.Optimizer, "absolute_tolerance" => optimize!(model) ``` -## Retrieve results +## Retrieve Results We then recover the objective value, the solution value, and termination status codes using standard JuMP syntax. The optimal value and solution values are then rescaled @@ -80,7 +80,8 @@ println("The rescaled optimal value is: $(round(rescaled_fval,digits=4))") println("The rescaled solution is $(round.(rescaled_xsol,digits=3)).") ``` -## Reference: +## References + 1. J. D. Smith, A. A. Neto, S. Cremaschi, and D. W. Crunkleton, CFD-based optimization of a flooded bed algae bioreactor, *Industrial & Engineering Chemistry Research*, 52 (2012), pp. 7181–7188. 2. A. M. Schweidtmann and A. Mitsos. Global Deterministic Optimization with Artificial Neural Networks Embedded [https://arxiv.org/pdf/1801.07114.pdf](https://arxiv.org/pdf/1801.07114.pdf). 3. Iain Dunning and Joey Huchette and Miles Lubin. JuMP: A Modeling Language for Mathematical Optimization, *SIAM Review*, 59 (2017), pp. 295-320. diff --git a/docs/src/ref.md b/docs/src/ref.md index a4c50721..7adea66b 100644 --- a/docs/src/ref.md +++ b/docs/src/ref.md @@ -1,10 +1,12 @@ -# **References** +# References ## *Branch and Bound* + - **Floudas, CA (2013)**. Deterministic global optimization: theory, methods and applications. Vol. 37. *Springer Science & Business Media*. - **Horst, R, Tuy, H (2013)**. Global optimization: Deterministic approaches. *Springer Science & Business Media*. ## *Parametric Interval Techniques* + - **Hansen ER, Walster GW (2004)**. Global Optimization Using Interval Analysis. *Marcel Dekker, New York, second edition*. - **Krawczyk R (1969)**. Newton-algorithmen zur bestimmung con nullstellen mit fehler-schranken. *Computing*, 4:187–201. - **Krawczyk R (1984)**. Interval iterations for including a set of solutions. *Computing*, 32:13–31. @@ -13,6 +15,7 @@ - **Moore RE (1977)**. A test for existence of solutions to nonlinear systems. *SIAM Journal on Numerical Analysis*, 14(4):611–615. ## *Domain Reduction* + - **Benhamou F, & Older WJ (1997)**. Applying interval arithmetic to real, integer, and boolean constraints. *The Journal of Logic Programming*, 32, 1–24. - **Caprara A, & Locatelli M (2010)**. Global optimization problems and domain reduction strategies. *Mathematical Programming*, 125, 123–137. - **Gleixner AM, Berthold T, Müller B, & Weltge S (2016)**. Three enhancements for optimization-based bound tightening. *ZIB Report*, 15–16. @@ -22,6 +25,7 @@ - **Vu, X, Schichl, H, & Sam-Haroud, D (2009)**. Interval propagation and search on directed acyclic graphs for numerical constraint solving. *Journal of Global Optimization*, 45, 499–531. ## *Generalized McCormick Relaxations* + - **Chachuat, B (2014).** MC++: a toolkit for bounding factorable functions, v1.0. Retrieved 2 July 2014 https://projects.coin-or.org/MCpp - **Mitsos A, Chachuat B, and Barton PI. (2009).** McCormick-based relaxations of algorithms. *SIAM Journal on Optimization*, 20(2):573–601. - **McCormick, GP (1976).**. Computability of global solutions to factorable nonconvex programs: Part I-Convex underestimating problems. *Mathematical Programming*, 10:147–175. @@ -33,5 +37,6 @@ - **Wechsung A, Scott JK, Watson HAJ, and Barton PI. (2015).** Reverse propagation of McCormick relaxations. *Journal of Global Optimization* 63(1):1-36. ## *Semi-Infinite Programming* + - **Mitsos A (2009).** Global optimization of semi-infinite programs via restriction of the right-hand side. *Optimization*, 60(10-11):1291-1308. - **Stuber MD and Barton PI (2015).** Semi-Infinite Optimization With Implicit Functions. *Industrial & Engineering Chemistry Research*, 54:307-317, 2015. diff --git a/docs/src/SemiInfinite/SIPProbFormulation.png b/docs/src/semiinfinite/SIPProbFormulation.png similarity index 100% rename from docs/src/SemiInfinite/SIPProbFormulation.png rename to docs/src/semiinfinite/SIPProbFormulation.png diff --git a/docs/src/SemiInfinite/SIPformulation.png b/docs/src/semiinfinite/SIPformulation.png similarity index 100% rename from docs/src/SemiInfinite/SIPformulation.png rename to docs/src/semiinfinite/SIPformulation.png diff --git a/docs/src/SemiInfinite/semiinfinite.md b/docs/src/semiinfinite/semiinfinite.md similarity index 65% rename from docs/src/SemiInfinite/semiinfinite.md rename to docs/src/semiinfinite/semiinfinite.md index 32ec54db..11d858d7 100644 --- a/docs/src/SemiInfinite/semiinfinite.md +++ b/docs/src/semiinfinite/semiinfinite.md @@ -1,15 +1,12 @@ # Solving Semi-Infinite Programming -[Matthew Wilhelm](https://psor.uconn.edu/person/matthew-wilhelm/) -Department of Chemical and Biomolecular Engineering, University of Connecticut +## Using EAGO to Solve a Semi-Infinite Program (SIP) -## Using EAGO to solve a SIP - -Semi-infinite programming remains an active area of research. In general, the solution of semi-infinite programs with nonconvex semi-infinite constraints of the below form are extremely challenging: +Semi-infinite programming remains an active area of research. In general, the solutions of SIPs with nonconvex semi-infinite constraints of the following form are extremely challenging: ![SipProbForm](SIPProbFormulation.png) -EAGO implements three different algorithm detailed in [1,2] to determine a globally optimal solution to problems of the above form. This accomplished using the `sip_solve` function which returns the optimal value, the solution, and a boolean feasibility value. To illustrate the functions use, a simple example is presented here which solves the below problem: +EAGO implements three different algorithms1,2 to determine a globally optimal solution to these problems. This is accomplished using the `sip_solve` function which returns the optimal value, the solution, and a boolean feasibility flag. To illustrate the use of this function, a simple example is presented here which solves the problem: ![SipForm](SIPformulation.png) @@ -32,7 +29,8 @@ println("The global minimum is attained at: x = $(sip_result.xsol).") println("Is the problem feasible? $(sip_result.feasibility).") ``` -## Semi-infinite solver +## Semi-Infinite Solver + ```@docs SIPProblem SIPResult @@ -47,5 +45,7 @@ println("Is the problem feasible? $(sip_result.feasibility).") sip_solve ``` +## References + 1. **Mitsos A (2009).** Global optimization of semi-infinite programs via restriction of the right-hand side. *Optimization*, 60(10-11):1291-1308. 2. **Djelassi, Hatim, and Alexander Mitsos.** A hybrid discretization algorithm with guaranteed feasibility for the global solution of semi-infinite programs. *Journal of Global Optimization*, 68.2 (2017): 227-253 should be used. diff --git a/examples/BeckerLago.jl b/examples/BeckerLago.jl index ff162e32..08b7b8d7 100644 --- a/examples/BeckerLago.jl +++ b/examples/BeckerLago.jl @@ -19,4 +19,4 @@ set_upper_bound(x[2], 10.0) # ----- Objective ----- # @objective(m, Min, objvar) -m = m # model get returned when including this script. +m = m # Model gets returned when including this script diff --git a/examples/Camel3.jl b/examples/Camel3.jl index 25200ba1..c3dc0a90 100644 --- a/examples/Camel3.jl +++ b/examples/Camel3.jl @@ -8,7 +8,7 @@ set_optimizer_attribute(m, "iteration_limit", 1000) set_optimizer_attribute(m, "cut_max_iterations", 2) set_optimizer_attribute(m, "subgrad_tighten", false) -# OBBT depth 0 -> 20... increases number of iterations... +# OBBT depth 0 -> 20, increases number of iterations set_optimizer_attribute(m, "obbt_depth", 8) set_optimizer_attribute(m, "obbt_repetitions", 2) diff --git a/examples/ex6_2_11.jl b/examples/ex6_2_11.jl index 4cb2b4b5..c0433350 100644 --- a/examples/ex6_2_11.jl +++ b/examples/ex6_2_11.jl @@ -10,7 +10,7 @@ set_optimizer_attribute(m, "output_iterations", 1000) set_optimizer_attribute(m, "iteration_limit", 10000000) set_optimizer_attribute(m, "cut_max_iterations", 2) -# OBBT depth 0 -> 20... increases number of iterations... +# OBBT depth 0 -> 20, increases number of iterations set_optimizer_attribute(m, "obbt_depth", 8) set_optimizer_attribute(m, "obbt_repetitions", 2) @@ -40,7 +40,7 @@ s = time() @NLobjective(m, Min, x[2]*x[3]*x[4] + x[2]*x[4]) @constraint(m, e2, x[2]+x[3]+x[4] == 1.0) -m = m # model get returned when including this script. +m = m # Model gets returned when including this script optimize!(m) @show termination_status(m) diff --git a/src/EAGO.jl b/src/EAGO.jl index 0730b9dc..951e9a86 100644 --- a/src/EAGO.jl +++ b/src/EAGO.jl @@ -1,13 +1,14 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ # src/EAGO.jl # The main file for EAGO. -############################################################################# +################################################################################ __precompile__() @@ -83,11 +84,11 @@ module EAGO export register_eago_operators! - # map/reduce nonallocating no bounds checking map-reduce like utilities + # Map/reduce nonallocating no bounds checking map-reduce like utilities include(joinpath(@__DIR__, "eago_optimizer", "debug_tools.jl")) include(joinpath(@__DIR__, "eago_optimizer", "utilities.jl")) - # creates a context that removes domain violations when constructing bounds + # Creates a context that removes domain violations when constructing bounds #include("eago_optimizer/guarded_context.jl") include(joinpath(@__DIR__, "eago_optimizer", "types", "log.jl")) @@ -97,15 +98,15 @@ module EAGO include(joinpath(@__DIR__, "eago_optimizer", "types", "incremental.jl")) include(joinpath(@__DIR__, "eago_optimizer", "types", "subsolver_block.jl")) - # load internal storage functions + # Loads internal storage functions include(joinpath(@__DIR__, "eago_optimizer", "functions", "functions.jl")) include(joinpath(@__DIR__, "eago_optimizer", "types", "global_optimizer.jl")) - # defines the optimizer structures + # Defines the optimizer structures include(joinpath(@__DIR__, "eago_optimizer", "optimizer.jl")) - # defines routines to add variable, saf, sqf, and nlp block constraints + # Defines routines to add variable, saf, sqf, and nlp block constraints include(joinpath(@__DIR__, "eago_optimizer", "moi_wrapper.jl")) # @@ -121,10 +122,10 @@ module EAGO # include(joinpath(@__DIR__, "eago_optimizer", "optimize", "optimize.jl")) - # import the script solving utilities + # Imports the script solving utilities include(joinpath(@__DIR__, "eago_script", "script.jl")) - # routines for solving SIPs + # Routines for solving SIPs export SIPResult, SIPProblem, SIPCallback, SIPSubResult, sip_solve, SIPRes, SIPResRev, SIPHybrid, build_model, set_tolerance_inner!, set_tolerance!, get_disc_set, diff --git a/src/eago_optimizer/debug_tools.jl b/src/eago_optimizer/debug_tools.jl index c2872557..e64ae228 100644 --- a/src/eago_optimizer/debug_tools.jl +++ b/src/eago_optimizer/debug_tools.jl @@ -1,7 +1,7 @@ """ print_problem_summary -Internal script used to display all constraints, objectives in a linear program. +Internal script used to display all constraints and objectives in a linear program. Added to functions for debug purposes while writing code. """ diff --git a/src/eago_optimizer/domain_reduction.jl b/src/eago_optimizer/domain_reduction.jl index ea7008cb..b81eec26 100644 --- a/src/eago_optimizer/domain_reduction.jl +++ b/src/eago_optimizer/domain_reduction.jl @@ -1,13 +1,14 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ # src/eago_optimizer/domain_reduction.jl # Contains subroutines used for domain reduction. -############################################################################# +################################################################################ """ $(TYPEDSIGNATURES) @@ -131,7 +132,7 @@ function aggressive_filtering!(m::GlobalOptimizer{R,S,Q}, n::NodeBB) where {R,S, end end - # Termination Condition + # Termination condition ((~any(m._new_low_index) & ~any(m._new_upp_index)) || (iszero(v))) && break if k >= 2 if (count(m._lower_indx_diff) + count(m._upper_indx_diff)) < m._parameters.obbt_aggressive_min_dimension @@ -226,7 +227,7 @@ end function Δxl(m, i) _lower_solution(BranchVar(),m,i) - _lower_bound(BranchVar(),m,i) end -function Δux(m, i) +function Δxu(m, i) _upper_bound(BranchVar(),m,i) - _lower_solution(BranchVar(),m,i) end @@ -275,15 +276,15 @@ function obbt!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} return false end - # continue tightening bounds by optimization until all indices have been checked + # Continue tightening bounds by optimization until all indices have been checked # or the node is empty and the problem is thus proven infeasible while (any(m._obbt_working_lower_index) || any(m._obbt_working_upper_index)) && !isempty(n) - # min of xLP - yL and xU - xLP for potential directions + # Determine min of xLP - yL and xU - xLP for potential directions lower_indx, lower_value = active_argmin(i -> Δxl(m, i), m._obbt_working_lower_index, obbt_variable_count) - upper_indx, upper_value = active_argmin(i -> Δux(m, i), m._obbt_working_upper_index, obbt_variable_count) + upper_indx, upper_value = active_argmin(i -> Δxu(m, i), m._obbt_working_upper_index, obbt_variable_count) - # default to upper bound if no lower bound is found, use maximum distance otherwise + # Default to upper bound if no lower bound is found, use maximum distance otherwise if lower_value <= upper_value && lower_indx > 0 m._obbt_working_lower_index[lower_indx] = false MOI.set(d, MOI.ObjectiveSense(), MOI.MIN_SENSE) @@ -295,9 +296,9 @@ function obbt!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} updated_value = MOI.get(d, MOI.ObjectiveValue()) # xLP[_bvi(m, lower_indx)] previous_value = n.lower_variable_bounds[lower_indx] - # if bound is improved update node and corresponding constraint update - # the node bounds and the single variable bound in the relaxation - # we assume branching does not occur on fixed variables and interval + # If bound is improved, update node and corresponding constraint. Update + # the node bounds and the single variable bound in the relaxation. + # We assume branching does not occur on fixed variables and interval # constraints are internally bridged by EAGO. So the only L <= x # constraint in the model is a GreaterThan. if updated_value > previous_value && (updated_value - previous_value) > 1E-6 @@ -331,9 +332,9 @@ function obbt!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} updated_value = MOI.get(d, MOI.ObjectiveValue()) # xLP[_bvi(m, upper_indx)] previous_value = n.upper_variable_bounds[upper_indx] - # if bound is improved update node and corresponding constraint update - # the node bounds and the single variable bound in the relaxation - # we assume branching does not occur on fixed variables and interval + # If bound is improved, update node and corresponding constraint. Update + # the node bounds and the single variable bound in the relaxation. + # We assume branching does not occur on fixed variables and interval # constraints are internally bridged by EAGO. So the only U => x # constraint in the model is a LessThan. if updated_value < previous_value && (previous_value - updated_value) > 1E-6 @@ -415,7 +416,7 @@ function fbbt! end function fbbt!(m::GlobalOptimizer, f::AffineFunctionIneq) - # compute full sum + # Compute full sum lower_bounds = m._lower_fbbt_buffer upper_bounds = m._upper_fbbt_buffer terms = f.terms @@ -429,8 +430,8 @@ function fbbt!(m::GlobalOptimizer, f::AffineFunctionIneq) end end - # subtract extra term, check to see if implied bound is better, if so update the node and - # the working sum if the node is now empty then break + # Subtract extra term and check to see if implied bound is better. If so, + # update the node and the working sum. If the node is now empty, then break. for k = 1:f.len aik, i = @inbounds terms[k] if !iszero(aik) @@ -459,7 +460,7 @@ function fbbt!(m::GlobalOptimizer, f::AffineFunctionIneq) end function fbbt!(m::GlobalOptimizer, f::AffineFunctionEq) - # compute full sum + # Compute full sum lower_bounds = m._lower_fbbt_buffer upper_bounds = m._upper_fbbt_buffer terms = f.terms @@ -476,8 +477,8 @@ function fbbt!(m::GlobalOptimizer, f::AffineFunctionEq) end end - # subtract extra term, check to see if implied bound is better, if so update the node and - # the working sum if the node is now empty then break + # Subtract extra term and check to see if implied bound is better. If so, + # update the node and the working sum. If the node is now empty, then break. for k = 1:f.len aik, i = @inbounds terms[k] if !iszero(aik) diff --git a/src/eago_optimizer/functions/functions.jl b/src/eago_optimizer/functions/functions.jl index bb359998..98cb6be1 100644 --- a/src/eago_optimizer/functions/functions.jl +++ b/src/eago_optimizer/functions/functions.jl @@ -1,13 +1,14 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/functions/nonlinear/functions.jl +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_optimizer/functions/functions.jl # Defines variable info and function types. -############################################################################# +################################################################################ include(joinpath(@__DIR__, "nonlinear","auxiliary_variables.jl")) diff --git a/src/eago_optimizer/functions/nonlinear/apriori_relax/affine_arithmetic.jl b/src/eago_optimizer/functions/nonlinear/apriori_relax/affine_arithmetic.jl index 238c5cd0..a182342a 100644 --- a/src/eago_optimizer/functions/nonlinear/apriori_relax/affine_arithmetic.jl +++ b/src/eago_optimizer/functions/nonlinear/apriori_relax/affine_arithmetic.jl @@ -2,9 +2,9 @@ const USE_MIN_RANGE = true struct AffineEAGO{N} - c::Float64 # mid-point - γ::SVector{N,Float64} # affine terms - Δ::Float64 # error term + c::Float64 # Mid-point + γ::SVector{N,Float64} # Affine terms + Δ::Float64 # Error term end function AffineEAGO(x::AffineEAGO{N}, p::Float64, q::Float64, δ::Float64) where N diff --git a/src/eago_optimizer/functions/nonlinear/auxiliary_variables.jl b/src/eago_optimizer/functions/nonlinear/auxiliary_variables.jl index afdbe49f..b255742f 100644 --- a/src/eago_optimizer/functions/nonlinear/auxiliary_variables.jl +++ b/src/eago_optimizer/functions/nonlinear/auxiliary_variables.jl @@ -5,9 +5,7 @@ function _not_EAGO_error!(m::JuMP.Model) end end -#= -Reference for auxiliary variables -=# +# Reference for auxiliary variables struct AuxiliaryVariableRef <: JuMP.AbstractVariableRef idx::Int model::JuMP.Model diff --git a/src/eago_optimizer/functions/nonlinear/composite_relax/forward_propagation.jl b/src/eago_optimizer/functions/nonlinear/composite_relax/forward_propagation.jl index b1a0a1c7..68569c47 100644 --- a/src/eago_optimizer/functions/nonlinear/composite_relax/forward_propagation.jl +++ b/src/eago_optimizer/functions/nonlinear/composite_relax/forward_propagation.jl @@ -1,14 +1,15 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/evaluator/passes.jl +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_optimizer/functions/nonlinear/composite_relax/forward_propagation.jl # Functions used to compute forward pass of nonlinear functions which include: -# set_value_post, overwrite_or_intersect, forward_pass_kernel, associated blocks -############################################################################# +# varset, fprop!, fprop_2!, fprop_n!, f_init! +################################################################################ xnum_yset(b, x, y) = is_num(b, x) && !is_num(b, y) xset_ynum(b, x, y) = !is_num(b, x) && is_num(b, y) @@ -217,12 +218,13 @@ function fprop_n!(t::Relax, ::Val{MULT}, g::DAT, b::RelaxCache{V,N,T}, k::Int) w zv = z*x wIntv = zv.Intv if (u1max < z.Intv.hi) || (u2max < x.Intv.hi) - u1cv, u2cv, u1cvg, u2cvg = estimator_under(zr, xr, s, dp, dP) + u1cv, u2cv, u1cvg, u2cvg = estimator_under(0, 0, zr, xr, s, dp, dP, 0, 0) + #u1cv, u2cv, u1cvg, u2cvg = estimator_under(xv, yv, xr, yr, s, dp, dP, p_rel, p_diam) za_l = McCormick.mult_apriori_kernel(z, x, wIntv, u1cv, u2cv, u1max, u2max, u1cvg, u2cvg) zv = zv ∩ za_l end if (v1nmax > -z.Intv.lo) || (v2nmax > -x.Intv.lo) - v1ccn, v2ccn, v1ccgn, v2ccgn = estimator_under(zr, xr, s, dp, dP) + v1ccn, v2ccn, v1ccgn, v2ccgn = estimator_under(0, 0, zr, xr, s, dp, dP, 0, 0) za_u = McCormick.mult_apriori_kernel(-z, -x, wIntv, v1ccn, v2ccn, v1nmax, v2nmax, v1ccgn, v2ccgn) zv = zv ∩ za_u end diff --git a/src/eago_optimizer/functions/nonlinear/composite_relax/reverse_propagation.jl b/src/eago_optimizer/functions/nonlinear/composite_relax/reverse_propagation.jl index 06e93069..42f181f0 100644 --- a/src/eago_optimizer/functions/nonlinear/composite_relax/reverse_propagation.jl +++ b/src/eago_optimizer/functions/nonlinear/composite_relax/reverse_propagation.jl @@ -1,13 +1,15 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/functions/nonlinear/reverse_pass.jl -# Functions used to compute reverse pass of nonlinear functions. -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_optimizer/functions/nonlinear/composite_relax/reverse_propagation.jl +# Functions used to compute reverse pass of nonlinear functions which include: +# r_init!, rprop!, rprop_2!, rprop_n! +################################################################################ function r_init!(t::Relax, g::DAT, b::RelaxCache{V,N,T}) where {V,N,T<:RelaxTag} if !is_num(b, 1) @@ -37,7 +39,7 @@ const MAX_ASSOCIATIVE_REVERSE = 6 """ $(FUNCTIONNAME) -Updates storage tapes with reverse evalution of node representing `n = x + y` which updates x & y. +Updates storage tapes with reverse evalution of node representing `n = x + y` which updates x and y. """ function rprop_2!(t::Relax, v::Val{PLUS}, g::DAT, c::RelaxCache{V,N,T}, k) where {V,N,T<:RelaxTag} is_num(c, k) && return true @@ -67,12 +69,12 @@ $(FUNCTIONNAME) Updates storage tapes with reverse evalution of node representing `n = +(x,y,z...)` which updates x, y, z and so on. """ function rprop_n!(t::Relax, v::Val{PLUS}, g::DAT, c::RelaxCache{V,N,T}, k::Int) where {V,N,T<:RelaxTag} - # out loops makes a temporary sum (minus one argument) - # a reverse is then compute with respect to this argument + # Outer loop makes a temporary sum (minus one argument) + # A reverse is then computed with respect to this argument count = 0 children_idx = children(g, k) for i in children_idx - is_num(c, i) && continue # don't contract a number valued argument + is_num(c, i) && continue # Don't contract a number valued argument (count >= MAX_ASSOCIATIVE_REVERSE) && break tsum = zero(MC{N,T}) count += 1 @@ -95,7 +97,7 @@ end """ $(FUNCTIONNAME) -Updates storage tapes with reverse evalution of node representing `n = x * y` which updates x & y. +Updates storage tapes with reverse evalution of node representing `n = x * y` which updates x and y. """ function rprop_2!(t::Relax, v::Val{MULT}, g::DAT, c::RelaxCache{V,N,T}, k::Int) where {V,N,T<:RelaxTag} @@ -128,11 +130,12 @@ $(FUNCTIONNAME) Updates storage tapes with reverse evalution of node representing `n = *(x,y,z...)` which updates x, y, z and so on. """ function rprop_n!(t::Relax, v::Val{MULT}, g::DAT, c::RelaxCache{V,N,T}, k::Int) where {V,N,T<:RelaxTag} - # a reverse is then compute with respect to this argument + # Outer loop makes a temporary sum (minus one argument) + # A reverse is then computed with respect to this argument count = 0 children_idx = children(g, k) for i in children_idx - is_num(b, i) && continue # don't contract a number valued argument + is_num(b, i) && continue # Don't contract a number valued argument (count >= MAX_ASSOCIATIVE_REVERSE) && break tmul = one(MC{N,T}) count += 1 diff --git a/src/eago_optimizer/functions/nonlinear/graph/expressions.jl b/src/eago_optimizer/functions/nonlinear/graph/expressions.jl index 3b72e236..659d756a 100644 --- a/src/eago_optimizer/functions/nonlinear/graph/expressions.jl +++ b/src/eago_optimizer/functions/nonlinear/graph/expressions.jl @@ -1,6 +1,6 @@ # Definitions borrow from https://github.com/FluxML/NNlib.jl (names used to -# standardize package). TODO: Decide how/if to incorporate NNlib depedency into -# McCormick.jl/EAGO.jl. +# standardize package). +#TODO: Decide how/if to incorporate NNlib depedency into McCormick.jl/EAGO.jl. oftf(x, y) = oftype(float(x), y) leakyrelu(x, a=oftf(x, 0.01)) = max(a * x, x) swish1(x) = x * sigmoid(x) @@ -126,7 +126,7 @@ foreach(x -> setindex!(ALL_ATOM_DICT, x[2], x[1]), UNIVARIATE_ATOM_DICT) foreach(x -> setindex!(ALL_ATOM_DICT, x[2], x[1]), BIVARIATE_ATOM_DICT) foreach(x -> setindex!(ALL_ATOM_DICT, x[2], x[1]), NARITY_ATOM_DICT) -# A functions that may be 1 to n-arity functions that correspond to the AtomType +# Functions that may be 1 to n-arity functions that correspond to the AtomType ALL_ATOM_DICT[BND] = :bnd #ATM_EVAL[QUAD1] = :quad1 #ATM_EVAL[QUAD2] = :quad2 diff --git a/src/eago_optimizer/functions/nonlinear/graph/graph.jl b/src/eago_optimizer/functions/nonlinear/graph/graph.jl index 2a224fe4..251fd5b4 100644 --- a/src/eago_optimizer/functions/nonlinear/graph/graph.jl +++ b/src/eago_optimizer/functions/nonlinear/graph/graph.jl @@ -1,6 +1,4 @@ -#= -TODO: Each graph representation is assumed to be static... so -=# +# TODO: Each graph representation is assumed to be static... so """ $(TYPEDEF) @@ -20,7 +18,7 @@ function _variable_count(g::AbstractDG)::Int error("Variable count not defined for graph type = $(typeof(g))") end -# added id field to MOI OperatorRegistry +# Added id field to MOI OperatorRegistry struct OperatorRegistry univariate_operators::Vector{Symbol} univariate_operator_id::Vector{Symbol} diff --git a/src/eago_optimizer/functions/nonlinear/graph/graphs/directed_tree.jl b/src/eago_optimizer/functions/nonlinear/graph/graphs/directed_tree.jl index 0074c315..13dc8ac7 100644 --- a/src/eago_optimizer/functions/nonlinear/graph/graphs/directed_tree.jl +++ b/src/eago_optimizer/functions/nonlinear/graph/graphs/directed_tree.jl @@ -9,7 +9,7 @@ abstract type AbstractCache end $(TYPEDSIGNATURES) Function used to initialize the storage cache `d::AbstractCache` -for a given type of directed acycle graph `g::AbstractDirectedGraph`. +for a given type of directed acyclic graph `g::AbstractDirectedGraph`. """ function initialize!(::AbstractCache, ::AbstractDirectedGraph) error("Initialization method not defined for this combination @@ -164,7 +164,7 @@ Base.@kwdef mutable struct DirectedTree <: AbstractDirectedAcyclicGraph nodes::Vector{Node} = Node[] "List of index of variables in this tree" variables::Dict{Int,Int} = Dict{Int,Int}() - "Information on all variables..." + "Information on all variables" v::VariableValues{Float64} = VariableValues{Float64}() "List of constant values" constant_values::Vector{Float64} = Float64[] @@ -193,7 +193,7 @@ end #DirectedTree(n::Int) = DirectedTree(children = spzeros(Bool,n,n), parents=spzeros(Bool,n,n)) const DAT = DirectedTree -# node property access functions that can be defined at abstract type +# Node property access functions that can be defined at abstract type node(g::DAT, i) = g.nodes[i] nodes(g::DAT) = g.nodes diff --git a/src/eago_optimizer/functions/nonlinear/graph/utilities.jl b/src/eago_optimizer/functions/nonlinear/graph/utilities.jl index f9708e7e..21302ab1 100644 --- a/src/eago_optimizer/functions/nonlinear/graph/utilities.jl +++ b/src/eago_optimizer/functions/nonlinear/graph/utilities.jl @@ -43,7 +43,6 @@ end # Access gradient sparsity of MOI storage. sparsity(d::MOIRAD._FunctionStorage) = d.grad_sparsity -#sparsity(d::MOIRAD._SubexpressionStorage) = d.sparsity # Compute gradient sparsity from MOI storage. function _compute_sparsity(d::MOIRAD._FunctionStorage, sparse_dict::Dict{Int,Vector{Int}}, is_sub, subexpr_indx) @@ -103,7 +102,7 @@ function linearity(d::MOIRAD.Linearity) (d == MOIRAD.LINEAR) && return LIN_LINEAR (d == MOIRAD.PIECEWISE_LINEAR) && return LIN_PIECEWISE_LINEAR (d == MOIRAD.NONLINEAR) && return LIN_NONLINEAR - LIN_CONSTANT # assumes d is then MOINL.CONSTANT + LIN_CONSTANT # Assumes d is then MOINL.CONSTANT end function linearity(nd::Vector{MOINL.Node}, adj::SparseMatrixCSC{Bool,Int}, d::Vector{MOIRAD.Linearity}) diff --git a/src/eago_optimizer/functions/nonlinear/interval/reverse.jl b/src/eago_optimizer/functions/nonlinear/interval/reverse.jl index 1a74cdc6..be466a2a 100644 --- a/src/eago_optimizer/functions/nonlinear/interval/reverse.jl +++ b/src/eago_optimizer/functions/nonlinear/interval/reverse.jl @@ -15,8 +15,8 @@ function rprop!(t::RelaxInterval, v::Subexpression, g::DAT, c::IntervalCache{T}, return true end -# needed for O(n) reverse interval propagation of + -# returns q for x = q + y +# Needed for O(n) reverse interval propagation of + +# Returns q for x = q + y function hukuhara_diff(x::Interval{T}, y::Interval{T}) where T<:Real isempty(x) && return x isempty(y) && return y @@ -41,8 +41,8 @@ function rprop!(t::RelaxInterval, v::Val{PLUS}, g::DAT, c::IntervalCache{T}, k:: return true end -# needed for close to O(n) reverse interval propagation of * -# returns q for x = q*y +# Needed for close to O(n) reverse interval propagation of * +# Returns q for x = q*y function hukuhara_div(x::Interval{T}, y::Interval{T}) where T<:Real isempty(x) && return x isempty(y) && return y diff --git a/src/eago_optimizer/functions/nonlinear/nonlinear.jl b/src/eago_optimizer/functions/nonlinear/nonlinear.jl index 5a2ec290..91f44756 100644 --- a/src/eago_optimizer/functions/nonlinear/nonlinear.jl +++ b/src/eago_optimizer/functions/nonlinear/nonlinear.jl @@ -1,16 +1,18 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_optimizer/functions/nonlinear/nonlinear.jl # Defines the NonlinearExpression, BufferedNonlinearFunction used in # constructing relaxations of nonlinear functions along with a number of # helper functions including an Evaluator structure and: set_node_flag! # set_node!, set_reference_point!, retrieve_node, prior_eval # copy_subexpression_value!, eliminate_fixed_variables! -############################################################################# +################################################################################ const DEBUG_NL = false @@ -158,7 +160,7 @@ num(d::BufferedNonlinearFunction{V,N,T}) where {V,N,T<:RelaxTag} = num(d.ex) lower_bound(d::BufferedNonlinearFunction{V,N,T}) where {V,N,T<:RelaxTag} = d.ex.lower_bound upper_bound(d::BufferedNonlinearFunction{V,N,T}) where {V,N,T<:RelaxTag} = d.ex.upper_bound -# returns the interval bounds associated with the set +# Returns the interval bounds associated with the set interval(d::BufferedNonlinearFunction{V,N,T}) where {V,N,T<:RelaxTag} = Interval{Float64}(set(d)) is_num(d::BufferedNonlinearFunction) = is_num(d.ex) @@ -170,7 +172,7 @@ mc_type(rc::BufferedNonlinearFunction{V,N,T}) where {V,N,T<:RelaxTag} = MC{N,T} MOI.AbstractNLPEvaluator for calculating relaxations of nonlinear terms. -Checks that the resulting value should be a number... +Checks that the resulting value should be a number. $(TYPEDFIELDS) """ @@ -226,15 +228,13 @@ end end prior_eval(d::Evaluator, i::Int) = d.subexpressions_eval[i] -#= -Assumes the sparsities are sorted... -=# +# Assumes the sparsities are sorted function copy_subexpression_value!(k::Int, op::Int, subexpression::NonlinearExpression{V,MC{N1,T}}, numvalued::Vector{Bool}, numberstorage::Vector{S}, setstorage::Vector{MC{N2,T}}, cv_buffer::Vector{S}, cc_buffer::Vector{S}, func_sparsity::Vector{Int}) where {V, N1, N2, S, T <: RelaxTag} - # fill cv_grad/cc_grad buffers + # Fill cv_grad/cc_grad buffers sub_sparsity = subexpression.grad_sparsity sset = subexpression.setstorage[1] fill!(cv_buffer, zero(S)) diff --git a/src/eago_optimizer/functions/nonlinear/register_special.jl b/src/eago_optimizer/functions/nonlinear/register_special.jl index 1adcfc53..1fb8a146 100644 --- a/src/eago_optimizer/functions/nonlinear/register_special.jl +++ b/src/eago_optimizer/functions/nonlinear/register_special.jl @@ -1,12 +1,14 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# Defines the a function used to register nonstandard nonlinear terms. -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_optimizer/functions/nonlinear/register_special.jl +# Defines the function used to register nonstandard nonlinear terms. +################################################################################ """ register_eago_operators! @@ -18,7 +20,7 @@ work can be used by other nonlinear solvers (Ipopt for instance). """ function register_eago_operators!(m::JuMP.Model) - # register activation functions w/o parameters + # Register activation functions w/o parameters JuMP.register(m, :relu, 1, relu, McCormick.relu_deriv, McCormick.relu_deriv2) JuMP.register(m, :leaky_relu, 1, leaky_relu, McCormick.leaky_relu_deriv, McCormick.leaky_relu_deriv2) JuMP.register(m, :maxsig, 1, maxsig, McCormick.maxsig_deriv, McCormick.maxsig_deriv2) @@ -31,14 +33,14 @@ function register_eago_operators!(m::JuMP.Model) JuMP.register(m, :gelu, 1, gelu, McCormick.gelu_deriv, McCormick.gelu_deriv2) JuMP.register(m, :swish, 1, swish, McCormick.swish_deriv, McCormick.swish_deriv2) JuMP.register(m, :xabsx, 1, xabsx, McCormick.xabsx_deriv, McCormick.xabsx_deriv2) - JuMP.register(m, :logcosh, 1, xabsx, McCormick.xabsx_deriv, McCormick.xabsx_deriv2) + JuMP.register(m, :logcosh, 1, logcosh, McCormick.logcosh_deriv, McCormick.logcosh_deriv2) - # register activation functions w/ parameters + # Register activation functions w/ parameters MOINL.register_operator(m.nlp_model, :param_relu, 2, param_relu, McCormick.param_relu_grad) MOINL.register_operator(m.nlp_model, :elu, 2, elu, McCormick.elu_grad) MOINL.register_operator(m.nlp_model, :selu, 3, selu, McCormick.selu_grad) - # register other functions + # Register other functions JuMP.register(m, :xlogx, 1, xlogx, McCormick.xlogx_deriv, McCormick.xlogx_deriv2) JuMP.register(m, :f_erf, 1, x -> erf(x), McCormick.erf_deriv, McCormick.erf_deriv2) JuMP.register(m, :f_erfinv, 1, x -> erfinv(x), McCormick.erfinv_deriv, McCormick.erfinv_deriv2) @@ -48,7 +50,7 @@ function register_eago_operators!(m::JuMP.Model) MOINL.register_operator(m.nlp_model, :arh, 2, arh, McCormick.arh_grad) MOINL.register_operator(m.nlp_model, :xexpax, 2, xexpax, McCormick.xexpax_grad) - # register bounding functions + # Register bounding functions MOINL.register_operator(m.nlp_model, :lower_bnd, 2, lower_bnd, McCormick.d_lower_bnd_grad) MOINL.register_operator(m.nlp_model, :upper_bnd, 2, upper_bnd, McCormick.d_upper_bnd_grad) MOINL.register_operator(m.nlp_model, :bnd, 3, bnd, McCormick.d_bnd_grad) diff --git a/src/eago_optimizer/functions/nonlinear/user_function/user_function.jl b/src/eago_optimizer/functions/nonlinear/user_function/user_function.jl index 1f28fd31..0a98279d 100644 --- a/src/eago_optimizer/functions/nonlinear/user_function/user_function.jl +++ b/src/eago_optimizer/functions/nonlinear/user_function/user_function.jl @@ -1,5 +1,5 @@ -# Defines the Vrev object which holds a value and an reverse function... +# Defines the Vrev object which holds a value and a reverse function """ Vrev{T<:Number,F} diff --git a/src/eago_optimizer/moi_wrapper.jl b/src/eago_optimizer/moi_wrapper.jl index 9190f4fd..d1178a61 100644 --- a/src/eago_optimizer/moi_wrapper.jl +++ b/src/eago_optimizer/moi_wrapper.jl @@ -1,13 +1,14 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +## Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/constraints.jl -# Defines constraints supported by optimizer and how to store them. -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_optimizer/moi_wrapper.jl +# MOI wrapper to pass information to the optimizer. +################################################################################ # Sets used in general constraints const INEQ_SETS = Union{LT, GT, ET} @@ -106,7 +107,7 @@ function MOI.is_empty(m::Optimizer{R,S,T}) where {R,S,T} flag &= m._termination_status_code == MOI.OPTIMIZE_NOT_CALLED flag &= m._result_status_code == MOI.OTHER_RESULT_STATUS - # set constructor reset on empty! and to zero in initial_parse! in parse.jl + # Set constructor reset on empty! and to zero in initial_parse! in parse.jl flag &= iszero(m._run_time) flag &= iszero(m._iteration_count) flag &= iszero(m._node_count) @@ -121,7 +122,7 @@ MOI.supports_incremental_interface(m::Optimizer) = true MOI.copy_to(model::Optimizer, src::MOI.ModelLike) = MOIU.default_copy_to(model, src) ##### -##### Set & get attributes of model +##### Set and get attributes of model ##### MOI.supports(::Optimizer, ::MOI.Silent) = true diff --git a/src/eago_optimizer/optimize/nonconvex/bound.jl b/src/eago_optimizer/optimize/nonconvex/bound.jl index 57804a3c..fdcad93f 100644 --- a/src/eago_optimizer/optimize/nonconvex/bound.jl +++ b/src/eago_optimizer/optimize/nonconvex/bound.jl @@ -1,13 +1,14 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/bound.jl +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_optimizer/optimize/nonconvex/bound.jl # Computes interval bounds of various functions. -############################################################################# +################################################################################ ### ### AFFINE FUNCTIONS diff --git a/src/eago_optimizer/optimize/nonconvex/configure_subsolver.jl b/src/eago_optimizer/optimize/nonconvex/configure_subsolver.jl index ebbc5882..ea3380d4 100644 --- a/src/eago_optimizer/optimize/nonconvex/configure_subsolver.jl +++ b/src/eago_optimizer/optimize/nonconvex/configure_subsolver.jl @@ -1,15 +1,16 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/subsolver_config/config.jl +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_optimizer/optimize/nonconvex/configure_subsolver.jl # Contains subroutines used to set default configuration for select supported # solvers along with routines needed to adjust tolerances to mirror tolerance # adjustments in the global solve. -############################################################################# +################################################################################ function set_default_config_udf!(s, m::MOI.AbstractOptimizer, verbosity::Int) if verbosity > 0 diff --git a/src/eago_optimizer/optimize/nonconvex/display.jl b/src/eago_optimizer/optimize/nonconvex/display.jl index ec4cdf37..96ace728 100644 --- a/src/eago_optimizer/optimize/nonconvex/display.jl +++ b/src/eago_optimizer/optimize/nonconvex/display.jl @@ -1,15 +1,16 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/display.jl +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_optimizer/optimize/nonconvex/display.jl # Functions used to print information about solution routine to console. # Printing is done with reference to the input problem if there is any # ambiguity. -############################################################################# +################################################################################ """ $(FUNCTIONNAME) @@ -36,16 +37,29 @@ function print_solution!(m::GlobalOptimizer) elseif m._end_state == GS_TIME_LIMIT println("Time Limit Exceeded") end - println("First Solution Found at Node $(m._first_solution_node)") #TODO: Why is this "first solution"? - if !_is_input_min(m) - println("LBD = $(MOI.get(m, MOI.ObjectiveBound()))") - println("UBD = $(MOI.get(m, MOI.ObjectiveValue()))") + if m._end_state == GS_OPTIMAL || m._end_state == GS_RELATIVE_TOL || m._end_state == GS_ABSOLUTE_TOL + println("Optimal Solution Found at Node $(m._solution_node)") + if !_is_input_min(m) + println("Lower Bound: $(MOI.get(m, MOI.ObjectiveBound()))") + println("Upper Bound: $(MOI.get(m, MOI.ObjectiveValue()))") + else + println("Lower Bound: $(MOI.get(m, MOI.ObjectiveBound()))") + println("Upper Bound: $(MOI.get(m, MOI.ObjectiveValue()))") + end + elseif m._end_state == GS_INFEASIBLE + println("No Solution Found") else - println("LBD = $(MOI.get(m, MOI.ObjectiveBound()))") - println("UBD = $(MOI.get(m, MOI.ObjectiveValue()))") + println("Best Solution Found at Node $(m._solution_node)") + if !_is_input_min(m) + println("Lower Bound: $(MOI.get(m, MOI.ObjectiveBound()))") + println("Upper Bound: $(MOI.get(m, MOI.ObjectiveValue()))") + else + println("Lower Bound: $(MOI.get(m, MOI.ObjectiveBound()))") + println("Upper Bound: $(MOI.get(m, MOI.ObjectiveValue()))") + end end - println("Solution is:") if m._feasible_solution_found + println("Solution:") for i = 1:m._input_problem._variable_count println(" X[$i] = $(m._continuous_solution[i])") end @@ -59,15 +73,18 @@ end $(FUNCTIONNAME) Print information about the current node. Includes node ID, lower bound, -and interval box. +upper bound, and interval box. """ function print_node!(m::GlobalOptimizer) if _verbosity(m) >= 3 n = m._current_node - bound = _is_input_min(m) ? n.lower_bound : -n.lower_bound + lower_bound = _is_input_min(m) ? n.lower_bound : -n.lower_bound + upper_bound = _is_input_min(m) ? n.upper_bound : -n.upper_bound k = length(n) - (_obj_var_slack_added(m) ? 1 : 0) println(" ") - println("Node ID: $(n.id), Lower Bound: $(bound)") + println("Node ID: $(n.id)") + println("Lower Bound: $(lower_bound)") + println("Upper Bound: $(upper_bound)") println("Lower Variable Bounds: $(n.lower_variable_bounds[1:k])") println("Upper Variable Bounds: $(n.upper_variable_bounds[1:k])") println(" ") @@ -86,68 +103,69 @@ function print_iteration!(m::GlobalOptimizer) if _verbosity(m) > 0 - # Print header line every `header_iterations` times - if mod(m._iteration_count, m._parameters.header_iterations) === 0 || m._iteration_count === 1 - println("-----------------------------------------------------------------------------------------------------------------------------") - println("| Iteration # | Nodes | Lower Bound | Upper Bound | Gap | Ratio | Time | Time Left |") - println("-----------------------------------------------------------------------------------------------------------------------------") - end - - # Print iteration summary every `output_iterations` times + # Print header line every `header_iterations` times and print iteration summary every `output_iterations` times if mod(m._iteration_count, m._parameters.output_iterations) === 0 + if m._iteration_count == m._parameters.output_iterations || mod(m._iteration_count, m._parameters.header_iterations) < m._parameters.output_iterations + println("---------------------------------------------------------------------------------------------------------------------------------") + println("| Iteration # | Nodes | Lower Bound | Upper Bound | Gap | Ratio | Timer | Time Left |") + println("---------------------------------------------------------------------------------------------------------------------------------") + end + # Print start + print_str = "| " - print_str = "| " - + # Print iteration number max_len = 12 temp_str = string(m._iteration_count) len_str = length(temp_str) - print_str *= (" "^(max_len - len_str))*temp_str*" | " + print_str *= (" "^(max_len - len_str))*temp_str*" | " - max_len = 12 + # Print node count + max_len = 13 temp_str = string(m._node_count) len_str = length(temp_str) print_str *= (" "^(max_len - len_str))*temp_str*" | " - max_len = 12 + # Determine lower and upper bound if _is_input_min(m) lower = m._global_lower_bound upper = m._global_upper_bound else - lower = m._global_lower_bound #TODO: Shouldn't these be negated? + lower = m._global_lower_bound upper = m._global_upper_bound end - #temp_str = string(round(lower, sigdigits = 5)) - #temp_str = string(lower, sigdigits = 3)) + + # Print lower bound + max_len = 13 temp_str = @sprintf "%.3E" lower len_str = length(temp_str) print_str *= (" "^(max_len - len_str))*temp_str*" | " - #temp_str = formatted(upper, PRINTING_IOFORMAT, ndigits=4, charset=PRINTING_CHARSET) - #temp_str = string(upper, sigdigits = 3)) + # Print upper bound + max_len = 13 temp_str = @sprintf "%.3E" upper len_str = length(temp_str) - print_str *= (" "^(max_len - len_str))*temp_str*" |" + print_str *= (" "^(max_len - len_str))*temp_str*" | " - max_len = 12 - #temp_str = string(round(abs(x._global_upper_bound - x._global_lower_bound), sigdigits = 3)) + # Print absolute gap between lower and upper bound + max_len = 13 temp_str = @sprintf "%.3E" abs(m._global_upper_bound - m._global_lower_bound) len_str = length(temp_str) print_str *= (" "^(max_len - len_str))*temp_str*" | " - max_len = 12 - #temp_str = string(round(relative_gap(x._global_lower_bound, x._global_upper_bound), sigdigits = 3)) + # Print relative gap between lower and upper bound + max_len = 13 temp_str = @sprintf "%.3E" relative_gap(m._global_lower_bound, m._global_upper_bound) len_str = length(temp_str) print_str *= (" "^(max_len - len_str))*temp_str*" | " - max_len = 12 - #temp_str = string(round(x._run_time, sigdigits = 3)) + # Print run time + max_len = 13 temp_str = @sprintf "%.3E" m._run_time len_str = length(temp_str) print_str *= (" "^(max_len - len_str))*temp_str*" | " - max_len = 12 - #temp_str = string(round(x._time_left, sigdigits = 4)) + # Print time remaining + max_len = 13 temp_str = @sprintf "%.3E" m._time_left len_str = length(temp_str) print_str *= (" "^(max_len - len_str))*temp_str*" |" @@ -172,23 +190,26 @@ function print_results!(m::GlobalOptimizer, lower_flag::Bool) println(" ") if lower_flag if _is_input_min(m) - print("Lower Bound (First Iteration): $(m._lower_objective_value),") + println("Lower Bound (First Iteration): $(m._lower_objective_value)") else - print("Upper Bound (First Iteration): $(m._lower_objective_value),") + println("Upper Bound (First Iteration): $(m._lower_objective_value)") end - print(" Solution: $(m._lower_solution[1:k]), Feasibility: $(m._lower_feasibility)\n") + println("Solution: $(m._lower_solution[1:k])") + println("Feasibility: $(m._lower_feasibility)") println("Termination Status Code: $(m._lower_termination_status)") println("Result Code: $(m._lower_primal_status)") else if _is_input_min(m) - print("Upper Bound: $(m._upper_objective_value), ") + println("Upper Bound: $(m._upper_objective_value)") else - print("Lower Bound: $(m._upper_objective_value), ") + println("Lower Bound: $(m._upper_objective_value)") end - print(" Solution: $(m._upper_solution[1:k]), Feasibility: $(m._upper_feasibility)\n") + println("Solution: $(m._upper_solution[1:k])") + println("Feasibility: $(m._upper_feasibility)") println("Termination Status Code: $(m._upper_termination_status)") println("Result Code: $(m._upper_result_status)") end + println(" ") end return end diff --git a/src/eago_optimizer/optimize/nonconvex/log_iteration.jl b/src/eago_optimizer/optimize/nonconvex/log_iteration.jl index 6b19add3..715dfc42 100644 --- a/src/eago_optimizer/optimize/nonconvex/log_iteration.jl +++ b/src/eago_optimizer/optimize/nonconvex/log_iteration.jl @@ -1,13 +1,14 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/logging/log_iteration.jl +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_optimizer/optimize/nonconvex/log_iteration.jl # Defines all routine to store information at each iteration. -############################################################################# +################################################################################ """ $(TYPEDSIGNATURES) diff --git a/src/eago_optimizer/optimize/nonconvex/lower_problem.jl b/src/eago_optimizer/optimize/nonconvex/lower_problem.jl index 11194b1f..094986aa 100644 --- a/src/eago_optimizer/optimize/nonconvex/lower_problem.jl +++ b/src/eago_optimizer/optimize/nonconvex/lower_problem.jl @@ -1,15 +1,16 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/optimize/lower_problem.jl +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_optimizer/optimize/nonconvex/lower_problem.jl # Functions which define how relaxations of subproblems are constructed, # when domain reductions algorithms are run, and the lower bounding (relaxed # problem solution subroutines). -############################################################################# +################################################################################ """ $(FUNCTIONNAME) @@ -124,11 +125,11 @@ function reset_relaxation!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionTyp m._new_eval_objective = true m._new_eval_constraint = true - # delete added affine constraints + # Delete added affine constraints foreach(c -> MOI.delete(d, c), m._affine_relax_ci) empty!(m._affine_relax_ci) - # delete variable + # Delete variable foreach(c -> MOI.delete(d, c[1]), m._relaxed_variable_et) foreach(c -> MOI.delete(d, c[1]), m._relaxed_variable_lt) foreach(c -> MOI.delete(d, c[1]), m._relaxed_variable_gt) @@ -138,7 +139,7 @@ function reset_relaxation!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionTyp empty!(m._relaxed_variable_gt) empty!(m._relaxed_variable_integer) - # delete objective cut + # Delete objective cut !isnothing(m._affine_objective_cut_ci) && MOI.delete(d, m._affine_objective_cut_ci) return end @@ -306,9 +307,9 @@ function preprocess!(t::ExtensionType, m::GlobalOptimizer{R,S,Q}) where {R,S,Q<: unpack_fbbt_buffer!(m) end - # done after cp to prevent using cp specific flags in cut generation + # Done after cp to prevent using cp specific flags in cut generation set_first_relax_point!(m) - # nonlinear CP can detect infeasibility and bound objective even if + # Nonlinear cp can detect infeasibility and bound objective even if # the relaxation is ill-posed, so one is always used to mitigate numerical issues cp_reps = _cp_depth(m) >= _iteration_count(m) ? _cp_repetitions(m) : 1 for _ = 1:_cp_repetitions(m) @@ -418,7 +419,7 @@ function lower_problem!(t::ExtensionType, m::GlobalOptimizer{R,S,Q}) where {R,S, status = RRS_INFEASIBLE end - # activate integrality conditions for MIP & solve MIP subproblem + # Activate integrality conditions for MIP and solve MIP subproblem if is_integer_subproblem(m) && (status !== RRS_INFEASIBLE) m._last_cut_objective = m._lower_objective_value for i = 1:_variable_num(BranchVar(), m) @@ -439,7 +440,7 @@ function lower_problem!(t::ExtensionType, m::GlobalOptimizer{R,S,Q}) where {R,S, end end - # check status -- if not feasible/infeasible then fallback to interval bounds + # Check status, if not feasible/infeasible then fallback to interval bounds if status == RRS_OPTIMAL m._lower_objective_value = MOI.get(d, MOI.ObjectiveValue()) end @@ -457,7 +458,7 @@ function lower_problem!(t::ExtensionType, m::GlobalOptimizer{R,S,Q}) where {R,S, return end - # set dual values + # Set dual values set_dual!(m) m._lower_feasibility = true store_lower_solution!(m, d) diff --git a/src/eago_optimizer/optimize/nonconvex/postprocess.jl b/src/eago_optimizer/optimize/nonconvex/postprocess.jl index b5383ed4..37d7c61d 100644 --- a/src/eago_optimizer/optimize/nonconvex/postprocess.jl +++ b/src/eago_optimizer/optimize/nonconvex/postprocess.jl @@ -1,14 +1,15 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ # src/eago_optimizer/optimize/nonconvex/postprocess.jl # Routine which calls duality-based bound tightening after solving the upper # bounding problem. -############################################################################# +################################################################################ """ $(TYPEDSIGNATURES) diff --git a/src/eago_optimizer/optimize/nonconvex/relax.jl b/src/eago_optimizer/optimize/nonconvex/relax.jl index 0f5e8fdd..96d65469 100644 --- a/src/eago_optimizer/optimize/nonconvex/relax.jl +++ b/src/eago_optimizer/optimize/nonconvex/relax.jl @@ -1,13 +1,14 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/relax.jl -# Defines routines used construct the relaxed subproblem. -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_optimizer/optimize/nonconvex/relax.jl +# Defines routines used to construct the relaxed subproblem. +################################################################################ """ $(TYPEDSIGNATURES) @@ -27,15 +28,15 @@ function is_safe_cut!(m::GlobalOptimizer, f::SAF) safe_u = m._parameters.cut_safe_u safe_b = m._parameters.cut_safe_b - (abs(f.constant) > safe_b) && return false # violates |b| <= safe_b + (abs(f.constant) > safe_b) && return false # Violates |b| <= safe_b term_count = length(f.terms) @inbounds for i = 1:term_count ai = f.terms[i].coefficient if ai !== 0.0 - if !(safe_l <= abs(ai) <= safe_u) # violates safe_l <= abs(ai) <= safe_u + if !(safe_l <= abs(ai) <= safe_u) # Violates safe_l <= abs(ai) <= safe_u return false end - @inbounds for j = i:term_count # violates safe_l <= abs(ai/aj) <= safe_u + @inbounds for j = i:term_count # Violates safe_l <= abs(ai/aj) <= safe_u aj = f.terms[j].coefficient if aj !== 0.0 if !(safe_l <= abs(ai/aj) <= safe_u) @@ -203,7 +204,7 @@ function relax!(m::GlobalOptimizer{R,S,Q}, f::BufferedNonlinearFunction{V,N,T}, else v = set(f) if !isempty(v) - # if has less than or equal to bound (<=) + # If has less than or equal to bound (<=) if isfinite(upper_bound(f)) lower_cut_valid = !isnan(v.cv) && isfinite(v.cv) if lower_cut_valid @@ -216,7 +217,7 @@ function relax!(m::GlobalOptimizer{R,S,Q}, f::BufferedNonlinearFunction{V,N,T}, valid_cut_flag = check_set_affine_nl!(m, f, lower_cut_valid, check_safe) end end - # if has greater than or equal to bound (>=) + # If has greater than or equal to bound (>=) if isfinite(lower_bound(f)) upper_cut_valid = !isnan(v.cc) && isfinite(v.cc) if upper_cut_valid diff --git a/src/eago_optimizer/optimize/nonconvex/stack_management.jl b/src/eago_optimizer/optimize/nonconvex/stack_management.jl index 1c97daab..a321404c 100644 --- a/src/eago_optimizer/optimize/nonconvex/stack_management.jl +++ b/src/eago_optimizer/optimize/nonconvex/stack_management.jl @@ -1,14 +1,15 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/optimize/nonconvex_branch/stack_management.jl +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_optimizer/optimize/nonconvex/stack_management.jl # Contains the subroutines used for stack management. Namely, initialize_stack! # select_branch_variable!, select_branch_point!, branch_node!, and fathom!. -############################################################################# +################################################################################ #= function _variable_infeasibility(m::GlobalOptimizer, i::Int) tsum = zero(Float64); tmin = typemax(Float64); tmax = typemin(Float64) @@ -235,7 +236,7 @@ $(TYPEDSIGNATURES) Check the optimization problem for unbounded branching variables, which would interfere with EAGO's branch-and-bound routine since there are no well-defined branching rules for cases where the interval bounds contain `-Inf` or `Inf`. If any branching variables -are missing bounds, add the missing bound at +/- 1e10 and warn the user. +are missing bounds, add the missing bound at +/- 1E10 and warn the user. """ function unbounded_check!(m::GlobalOptimizer) if m._parameters.unbounded_check @@ -245,17 +246,17 @@ function unbounded_check!(m::GlobalOptimizer) for i = 1:_variable_num(BranchVar(), m) - epigraph_flag #Not including epigraph reformulation variable if !wp._variable_info[i].has_lower_bound unbounded_flag = true - wp._variable_info[i] = VariableInfo(wp._variable_info[i], GT(-1e10)) + wp._variable_info[i] = VariableInfo(wp._variable_info[i], GT(-1E10)) end if !wp._variable_info[i].has_upper_bound unbounded_flag = true - wp._variable_info[i] = VariableInfo(wp._variable_info[i], LT(1e10)) + wp._variable_info[i] = VariableInfo(wp._variable_info[i], LT(1E10)) end end unbounded_flag && @warn(""" At least one branching variable is unbounded. This will interfere with EAGO's global optimization routine and may cause unexpected results. Bounds have been automatically - generated at +/- 1e10 for all unbounded variables, but tighter user-defined bounds are + generated at +/- 1E10 for all unbounded variables, but tighter user-defined bounds are highly recommended. To disable this warning and the automatic generation of bounds, use the option `unbounded_check = false`.""") end diff --git a/src/eago_optimizer/optimize/nonconvex/upper_problem.jl b/src/eago_optimizer/optimize/nonconvex/upper_problem.jl index 27dede73..609a0d81 100644 --- a/src/eago_optimizer/optimize/nonconvex/upper_problem.jl +++ b/src/eago_optimizer/optimize/nonconvex/upper_problem.jl @@ -1,14 +1,15 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/optimize/nonconvex.jl +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_optimizer/optimize/nonconvex/upper_problem.jl # Functions which determine when the upper bounding (local NLP) problem should # be solved as well as routines used to call the upper bounding problem. -############################################################################# +################################################################################ """ $(TYPEDSIGNATURES) diff --git a/src/eago_optimizer/optimize/optimize.jl b/src/eago_optimizer/optimize/optimize.jl index 23ef5bc8..7aabc271 100644 --- a/src/eago_optimizer/optimize/optimize.jl +++ b/src/eago_optimizer/optimize/optimize.jl @@ -1,14 +1,15 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ # src/eago_optimizer/optimize/optimize.jl -# Load subproblem type optimization routines and define general optimize! -# throw_optimize_hook!. -############################################################################# +# Load subproblem type optimization routines and define general optimize! and +# throw_optimize_hook! functions. +################################################################################ include("optimize_lp.jl") include("optimize_conic.jl") @@ -39,7 +40,7 @@ function MOI.optimize!(m::Optimizer{Q,S,T}) where {Q,S,T} optimize!(m._working_problem._problem_type, m) else - # throws to user-defined optimization hook + # Throws to user-defined optimization hook throw_optimize_hook!(m) end diff --git a/src/eago_optimizer/optimize/optimize_conic.jl b/src/eago_optimizer/optimize/optimize_conic.jl index bb8de712..a05ce1e4 100644 --- a/src/eago_optimizer/optimize/optimize_conic.jl +++ b/src/eago_optimizer/optimize/optimize_conic.jl @@ -1,15 +1,16 @@ -# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ # src/eago_optimizer/optimize/optimize_conic.jl # Contains the optimize! routine for SOCP (and in the future MISOCP) type # problems. This also includes functions to add variables, linear # constraints, soc constraints, and unpack solutions. -############################################################################# +################################################################################ ### LP and MILP routines function add_soc_constraints!(m::GlobalOptimizer, opt::T) where T diff --git a/src/eago_optimizer/optimize/optimize_convex.jl b/src/eago_optimizer/optimize/optimize_convex.jl index e36b0b84..d7e271a9 100644 --- a/src/eago_optimizer/optimize/optimize_convex.jl +++ b/src/eago_optimizer/optimize/optimize_convex.jl @@ -1,16 +1,17 @@ -# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ # src/eago_optimizer/optimize/optimize_convex.jl # Contains the solve_local_nlp! routine which computes the optimal value # of a convex function. This is used to compute the upper bound in the # branch and bound routine. A number of utility functions required for # solve_local_nlp! are also included. -############################################################################# +################################################################################ """ $(TYPEDSIGNATURES) @@ -116,10 +117,10 @@ function local_problem_status(t::MOI.TerminationStatusCode, r::MOI.ResultStatusC return LRS_FEASIBLE elseif (t == MOI.LOCALLY_SOLVED) && (r == MOI.FEASIBLE_POINT) return LRS_FEASIBLE - # This is default solver specific... the acceptable constraint tolerances + # This is default solver specific. The acceptable constraint tolerances # are set to the same values as the basic tolerance. As a result, an # acceptably solved solution is feasible but non necessarily optimal - # so it should be treated as a feasible point + # so it should be treated as a feasible point. elseif (t == MOI.ALMOST_LOCALLY_SOLVED) && (r == MOI.NEARLY_FEASIBLE_POINT) return LRS_FEASIBLE end @@ -169,7 +170,7 @@ function solve_local_nlp!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType _update_upper_variables!(upper_optimizer, m) _set_starting_point!(upper_optimizer, m) - # add constraints + # Add constraints ip = m._input_problem _add_constraint_store_ci_linear!(upper_optimizer, ip) _add_constraint_store_ci_quadratic!(upper_optimizer, ip) diff --git a/src/eago_optimizer/optimize/optimize_lp.jl b/src/eago_optimizer/optimize/optimize_lp.jl index 2895fc37..de966fd0 100644 --- a/src/eago_optimizer/optimize/optimize_lp.jl +++ b/src/eago_optimizer/optimize/optimize_lp.jl @@ -1,15 +1,16 @@ -# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ # src/eago_optimizer/optimize/optimize_lp.jl # Contains the optimize! routines for LP and MILP type problems. This also # includes functions to add variables, linear constraints, soc constraints, # and unpack solutions. -############################################################################# +################################################################################ function add_variables(m::GlobalOptimizer, d) n = m._input_problem._variable_count diff --git a/src/eago_optimizer/optimize/optimize_nonconvex.jl b/src/eago_optimizer/optimize/optimize_nonconvex.jl index 012068da..56abc6b5 100644 --- a/src/eago_optimizer/optimize/optimize_nonconvex.jl +++ b/src/eago_optimizer/optimize/optimize_nonconvex.jl @@ -1,14 +1,15 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ # src/eago_optimizer/optimize/optimize_nonconvex.jl # Contains the optimize! routine and subroutines needed in the branch and # bound routine called by EAGO. -############################################################################# +################################################################################ include(joinpath(@__DIR__,"nonconvex","stack_management.jl")) include(joinpath(@__DIR__,"nonconvex","lower_problem.jl")) @@ -34,7 +35,7 @@ and quadratic cut into the relaxed optimizer. function load_relaxed_problem!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} d = _relaxed_optimizer(m) - # add variables and indices and constraints + # Add variables, variable indices, and constraints wp = m._working_problem branch_variable_count = 0 @@ -73,10 +74,10 @@ function load_relaxed_problem!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:Extensio MOI.add_constraint(d, issue_var, ET(0.0)) - # set number of variables to branch on + # Set number of variables to branch on m._branch_variable_count = branch_variable_count - # add linear constraints + # Add linear constraints for (f, s) in collect(values(m._input_problem._linear_leq_constraints)) MOI.add_constraint(d, f, s) end @@ -87,7 +88,7 @@ function load_relaxed_problem!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:Extensio MOI.add_constraint(d, f, s) end - # sets relaxed problem objective sense to Min as all problems + # Sets relaxed problem objective sense to Min as all problems # are internally converted in Min problems in EAGO MOI.set(d, MOI.ObjectiveSense(), MOI.MIN_SENSE) MOI.set(d, MOI.ObjectiveFunction{SAF}(), wp._objective_saf) @@ -123,8 +124,8 @@ function presolve_global!(t::ExtensionType, m::GlobalOptimizer) m._lower_uvd = fill(0.0, branch_variable_count) # Populate in full space until local MOI NLP solves support constraint deletion. - # Uses input model for local NLP solves... may adjust this if there's ever a - # convincing reason to use a reformulated upper problem + # Uses input model for local NLP solves. May adjust this if there's ever a + # convincing reason to use a reformulated upper problem. m._lower_solution = zeros(Float64, wp._variable_count) m._continuous_solution = zeros(Float64, wp._variable_count) m._upper_solution = zeros(Float64, wp._variable_count) @@ -171,9 +172,9 @@ function termination_check(t::ExtensionType, m::GlobalOptimizer) nlen = length(m._stack) L = m._global_lower_bound U = m._global_upper_bound - if nlen == 0 && m._first_solution_node > 0 + if nlen == 0 && m._solution_node > 0 m._end_state = GS_OPTIMAL - elseif nlen == 0 && !(m._first_solution_node > 0) + elseif nlen == 0 && !(m._solution_node > 0) m._end_state = GS_INFEASIBLE elseif nlen >= m._parameters.node_limit m._end_state = GS_NODE_LIMIT @@ -214,7 +215,7 @@ end const GLOBALEND_PSTATUS = Dict{GlobalEndState, MOI.ResultStatusCode}( GS_OPTIMAL => MOI.FEASIBLE_POINT, - GS_INFEASIBLE => MOI.NO_SOLUTION, # Proof of infeasibility implies not solution found + GS_INFEASIBLE => MOI.NO_SOLUTION, # Proof of infeasibility implies no solution found GS_NODE_LIMIT => MOI.UNKNOWN_RESULT_STATUS, GS_ITERATION_LIMIT => MOI.UNKNOWN_RESULT_STATUS, GS_RELATIVE_TOL => MOI.FEASIBLE_POINT, @@ -274,7 +275,7 @@ user writes `optimize!(model)`. Here, `optimize_hook!` is used to bypass EAGO's problem parsing and treat every problem using its branch-and-bound routine. This is done in this example by telling EAGO to treat the problem as a mixed integer -non-convex problem, which normally dispatches to branch-and-bound. +nonconvex problem, which normally dispatches to branch-and-bound. ```julia-repl struct MyNewExtension <: EAGO.ExtensionType end import EAGO: optimize_hook! @@ -295,14 +296,14 @@ $(TYPEDSIGNATURES) If the most recent upper problem returned a feasible result, and the upper objective value is less than the previous best-known global upper bound, set the most recent upper problem result to be the new global upper bound. -Update the `_feasible_solution_found`, `_first_solution_node`, +Update the `_feasible_solution_found`, `_solution_node`, `_global_upper_bound`, and `_continuous_solution` fields of the `GlobalOptimizer` accordingly. """ function store_candidate_solution!(m::GlobalOptimizer) if m._upper_feasibility && (m._upper_objective_value < m._global_upper_bound) m._feasible_solution_found = true - m._first_solution_node = m._maximum_node_id + m._solution_node = m._maximum_node_id m._global_upper_bound = m._upper_objective_value @__dot__ m._continuous_solution = m._upper_solution end @@ -471,8 +472,8 @@ function unpack_global_solution!(m::Optimizer{R,S,Q}) where {R,S,Q<:ExtensionTyp m._run_time = g._run_time m._node_count = g._maximum_node_id - # evaluate objective (so there isn't a small difference in f(x) and objective_value) - # local solvers that solve to feasibility may result in a slightly lower than true solve... + # Evaluate objective (so there isn't a small difference in f(x) and objective_value) + # local solvers that solve to feasibility may result in a slightly lower than true solve. # TODO # Store objective value and objective bound diff --git a/src/eago_optimizer/optimizer.jl b/src/eago_optimizer/optimizer.jl index a0bb97a0..fb3afcab 100644 --- a/src/eago_optimizer/optimizer.jl +++ b/src/eago_optimizer/optimizer.jl @@ -1,14 +1,15 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +## Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ # src/eago_optimizer/optimizer.jl # Defines optimizer structure used by EAGO. Namely, ObjectiveType, ProblemType # EAGOParameters, InputProblem, ParsedProblem, and Optimizer. -############################################################################# +################################################################################ export Optimizer """ diff --git a/src/eago_optimizer/parse.jl b/src/eago_optimizer/parse.jl index 1f1b2634..4e27a3f2 100644 --- a/src/eago_optimizer/parse.jl +++ b/src/eago_optimizer/parse.jl @@ -1,14 +1,16 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_optimizer/parse.jl # Defines functions used to parse the input optimization problem into # a solvable form including routines used to classify input problems as a # LP, SOCP, MILP, MISOCP, and convex problem types. -############################################################################# +################################################################################ """ add_objective! @@ -93,7 +95,7 @@ function add_nonlinear!(m::GlobalOptimizer, evaluator::MOI.AbstractNLPEvaluator) user_operator_registry = OperatorRegistry(evaluator.model.operators) - # set nlp data structure + # Set nlp data structure m._working_problem._nlp_data = nlp_data mul_relax = m._parameters.mul_relax_style if mul_relax == 1 @@ -114,20 +116,20 @@ function add_nonlinear!(m::GlobalOptimizer, evaluator::MOI.AbstractNLPEvaluator) ruse_apriori = false end - # add subexpressions (assumes they are already ordered by JuMP) - # creates a dictionary that lists the subexpression sparsity - # by search each node for variables dict[2] = [2,3] indicates - # that subexpression 2 depends on variables 2 and 3 - # this is referenced when subexpressions are called by other - # subexpressions or functions to determine overall sparsity - # the sparsity of a function is the collection of indices + # Add subexpressions (assumes they are already ordered by JuMP) + # Creates a dictionary that lists the subexpression sparsity + # by searching each node for variables dict[2] = [2,3]. Indicates + # that subexpression 2 depends on variables 2 and 3. + # This is referenced when subexpressions are called by other + # subexpressions or functions to determine overall sparsity. + # The sparsity of a function is the collection of indices # in all participating subexpressions and the function itself - # it is necessary to define this as such to enable reverse - # McCormick constraint propagation + # is necessary to define this as such to enable reverse + # McCormick constraint propagation. relax_evaluator = m._working_problem._relaxed_evaluator relax_evaluator.relax_type = renum dict_sparsity = Dict{Int,Vector{Int}}() - if length(evaluator.model.expressions) > 0 # should check for nonlinear objective, constraint + if length(evaluator.model.expressions) > 0 # Should check for nonlinear objective/constraint for i = 1:length(evaluator.backend.subexpressions) subexpr = evaluator.backend.subexpressions[i] nlexpr = NonlinearExpression!(m._auxiliary_variable_info, rtype, subexpr, MOI.NLPBoundsPair(-Inf, Inf), @@ -138,14 +140,14 @@ function add_nonlinear!(m::GlobalOptimizer, evaluator::MOI.AbstractNLPEvaluator) end end - # scrubs udf functions using Cassette to remove odd data structures... - # alternatively convert udfs to JuMP scripts... + # Scrubs udf functions using Cassette to remove odd data structures + # Alternatively, convert udfs to JuMP scripts m._parameters.presolve_scrubber_flag && Script.scrub!(m._working_problem._nlp_data) if m._parameters.presolve_to_JuMP_flag Script.udf_loader!(m) end - # add nonlinear objective + # Add nonlinear objective if evaluator.model.objective !== nothing m._working_problem._objective = BufferedNonlinearFunction(m._auxiliary_variable_info, rtype, evaluator.backend.objective, MOI.NLPBoundsPair(-Inf, Inf), dict_sparsity, evaluator.backend.subexpression_linearity, @@ -154,7 +156,7 @@ function add_nonlinear!(m::GlobalOptimizer, evaluator::MOI.AbstractNLPEvaluator) m._parameters.relax_tag, ruse_apriori) end - # add nonlinear constraints + # Add nonlinear constraints constraint_bounds = m._working_problem._nlp_data.constraint_bounds for i = 1:length(evaluator.model.constraints) constraint = evaluator.backend.constraints[i] @@ -222,10 +224,10 @@ function reform_epigraph_min!(m::GlobalOptimizer, d::ParsedProblem, f::BufferedQ ηi = add_η!(d) if !isnothing(ip._nlp_data) - # add variable + # Add variable η = MOI.VariableIndex(ip._variable_count + 1) push!(ip._nlp_data.evaluator.backend.ordered_variables, η) - # add objective + # Add objective MOINL.set_objective(ip._nlp_data.evaluator.model, :($η)) end @@ -267,7 +269,7 @@ function reform_epigraph_min!(m::GlobalOptimizer, d::ParsedProblem, f::BufferedN MOINL.set_objective(ip._nlp_data.evaluator.model, :($η)) wp._objective_saf = SAF([SAT(1.0, VI(ηi))], 0.0) - # check if input problem is min or max + # Check if input problem is min or max if !_is_input_min(m) cons = MOINL.add_expression(ip._nlp_data.evaluator.model, :(-$expr - $η)) MOINL.add_constraint(ip._nlp_data.evaluator.model, :($cons), MOI.LessThan(0.0)) @@ -326,8 +328,7 @@ function label_branch_variables!(m::GlobalOptimizer) m._user_branch_variables = !isempty(m._parameters.branch_variable) if m._user_branch_variables m._branch_variables = m._parameters.branch_variable - if length(wp._variable_info) > length(m._branch_variables) #Should only need 1 - push!(m._parameters.branch_variable, true) + if length(wp._variable_info) > length(m._branch_variables) # Should only need 1 push!(m._branch_variables, true) end else @@ -355,7 +356,7 @@ function label_branch_variables!(m::GlobalOptimizer) end end - # add a map of branch/node index to variables in the continuous solution + # Add a map of branch/node index to variables in the continuous solution for i = 1:wp._variable_count if is_fixed(wp._variable_info[i]) m._branch_variables[i] = false @@ -369,14 +370,14 @@ function label_branch_variables!(m::GlobalOptimizer) end end - # creates reverse map + # Creates reverse map m._sol_to_branch_map = zeros(wp._variable_count) for i = 1:length(m._branch_to_sol_map) j = m._branch_to_sol_map[i] m._sol_to_branch_map[j] = i end - # adds branch solution to branch map to evaluator + # Adds branch solution to branch map to evaluator vnum = wp._variable_count initialize!(m._branch_cost, length(m._branch_to_sol_map)) l = lower_bound.(m._working_problem._variable_info) diff --git a/src/eago_optimizer/types/global_optimizer.jl b/src/eago_optimizer/types/global_optimizer.jl index 41ec0eff..a1c901a1 100644 --- a/src/eago_optimizer/types/global_optimizer.jl +++ b/src/eago_optimizer/types/global_optimizer.jl @@ -725,8 +725,8 @@ Base.@kwdef mutable struct GlobalOptimizer{Q,S,T<:ExtensionType} <: MOI.Abstract "A flag for if a feasible solution was identified. Updated if preprocessing, lower problem, and upper problem all return feasible values" _feasible_solution_found::Bool = false - "The node ID of the best-known feasible upper problem solution" - _first_solution_node::Int = -1 #TODO: Why is this called "First" solution node? Isn't it just the location of the current best solution? + "The node ID of the best-known feasible upper problem solution (default = -1, if no feasible solution is found)" + _solution_node::Int = -1 "The best-known upper bound" _best_upper_value::Float64 = Inf #TODO: Duplicate of _global_upper_bound? Why is this ever used instead? diff --git a/src/eago_optimizer/types/incremental.jl b/src/eago_optimizer/types/incremental.jl index df292ebe..93994504 100644 --- a/src/eago_optimizer/types/incremental.jl +++ b/src/eago_optimizer/types/incremental.jl @@ -1,16 +1,17 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ # src/eago_optimizer/types/incremental.jl # A type-stable wrapper for optimizers used by EAGO to enable bridging and # incremental loading. This is tailored to the internal routines used by EAGO.jl # so methods may be specialized by optimizer types and error checking is often # avoided. -############################################################################# +################################################################################ #= mutable struct IncrementalCache{S <: MOI.AbstractOptimizer} <: MOI.AbstractOptimizer} @@ -116,49 +117,57 @@ function MOI.set(d::Incremental, ::MOI.ConstraintSet, ci::CI{VI,T}, s::T) where return end -# Get attributes -MOI.get(d::Incremental{S}, ::MOI.TerminationStatus) where S = MOI.get(d.optimizer, MOI.TerminationStatus())::MOI.TerminationStatusCode -MOI.get(d::Incremental{S}, ::MOI.PrimalStatus) where S = MOI.get(d.optimizer, MOI.PrimalStatus())::MOI.ResultStatusCode +# Get optimizer attributes MOI.get(d::Incremental{S}, ::MOI.DualStatus) where S = MOI.get(d.optimizer, MOI.DualStatus())::MOI.ResultStatusCode +MOI.get(d::Incremental{S}, ::MOI.PrimalStatus) where S = MOI.get(d.optimizer, MOI.PrimalStatus())::MOI.ResultStatusCode MOI.get(d::Incremental{S}, ::MOI.RawStatusString) where S = MOI.get(d.optimizer, MOI.RawStatusString())::MOI.String +MOI.get(d::Incremental{S}, ::MOI.ResultCount) where S = MOI.get(d.optimizer, MOI.ResultCount())::Int +MOI.get(d::Incremental{S}, ::MOI.TerminationStatus) where S = MOI.get(d.optimizer, MOI.TerminationStatus())::MOI.TerminationStatusCode -for T in (MOI.ObjectiveBound, MOI.ObjectiveValue, MOI.DualObjectiveValue) - @eval MOI.get(d::Incremental{S}, ::$T) where S = MOI.get(d.optimizer, ($T)())::Float64 -end -MOI.get(d::Incremental{S}, ::MOI.ObjectiveSense) where S = MOI.get(d.optimizer, MOI.ObjectiveSense()) -MOI.get(d::Incremental{S}, ::MOI.ObjectiveFunctionType) where S = MOI.get(_get_storage(d), MOI.ObjectiveFunctionType()) +MOI.get(d::Incremental{S}, ::MOI.DualObjectiveValue) where S = MOI.get(d.optimizer, MOI.DualObjectiveValue())::Float64 +MOI.get(d::Incremental{S}, ::MOI.ObjectiveBound) where S = MOI.get(d.optimizer, MOI.ObjectiveBound())::Float64 +MOI.get(d::Incremental{S}, ::MOI.ObjectiveValue) where S = MOI.get(d.optimizer, MOI.ObjectiveValue())::Float64 + +MOI.get(d::Incremental{S}, ::MOI.Silent) where S = MOI.get(d.optimizer, MOI.Silent())::Bool +MOI.get(d::Incremental{S}, n::MOI.SolverName) where S = MOI.get(d.optimizer, n)::String +MOI.get(d::Incremental{S}, ::MOI.SolverVersion) where S = MOI.get(d.optimizer, MOI.SolverVersion())::String +MOI.get(d::Incremental{S}, ::MOI.SolveTimeSec) where S = MOI.get(d.optimizer, MOI.SolveTimeSec())::Float64 +MOI.get(d::Incremental{S}, ::MOI.TimeLimitSec) where S = MOI.get(d.optimizer, MOI.TimeLimitSec()) + +# Get model attributes +MOI.get(d::Incremental{S}, x::MOI.ListOfConstraintAttributesSet{T}) where {S,T} = MOI.get(_get_storage(d), x) +MOI.get(d::Incremental{S}, x::MOI.ListOfConstraintIndices{T}) where {S,T} = MOI.get(_get_storage(d), x) +MOI.get(d::Incremental{S}, n::MOI.ListOfConstraintTypesPresent) where S = MOI.get(_get_storage(d), n) +MOI.get(d::Incremental{S}, n::MOI.ListOfModelAttributesSet) where S = MOI.get(_get_storage(d), n) +MOI.get(d::Incremental{S}, n::MOI.ListOfVariableAttributesSet) where S = MOI.get(_get_storage(d), n) +MOI.get(d::Incremental{S}, n::MOI.ListOfVariableIndices) where S = MOI.get(_get_storage(d), n) +MOI.get(d::Incremental{S}, n::MOI.NumberOfConstraints{T}) where {S,T} = MOI.get(_get_storage(d), n) +MOI.get(d::Incremental{S}, n::MOI.NumberOfVariables) where S = MOI.get(_get_storage(d), n) +MOI.get(d::Incremental{S}, n::MOI.Name) where S = MOI.get(_get_storage(d), n) MOI.get(d::Incremental{S}, ::MOI.ObjectiveFunction{T}) where {S,T} = MOI.get(_get_storage(d), MOI.ObjectiveFunction{T}()) +MOI.get(d::Incremental{S}, ::MOI.ObjectiveFunctionType) where S = MOI.get(_get_storage(d), MOI.ObjectiveFunctionType()) +MOI.get(d::Incremental{S}, ::MOI.ObjectiveSense) where S = MOI.get(d.optimizer, MOI.ObjectiveSense()) +# Get variable attributes MOI.get(d::Incremental{S}, ::MOI.VariableName, vi::VI) where S = MOI.get(_get_storage(d), MOI.VariableName(), vi) -MOI.get(d::Incremental{S}, x::MOI.ListOfConstraintIndices{T}) where {S,T} = MOI.get(_get_storage(d), x) +MOI.get(d::Incremental{S}, ::MOI.VariablePrimal, vi::VI) where S = MOI.get(d.optimizer, MOI.VariablePrimal(), vi)::Float64 +# Get constraint attributes MOI.get(d::Incremental{S}, ::MOI.ConstraintFunction, ci::MOI.ConstraintIndex{T}) where {S,T} = MOI.get(_get_storage(d), MOI.ConstraintFunction(), ci) MOI.get(d::Incremental{S}, ::MOI.ConstraintSet, ci::MOI.ConstraintIndex{T}) where {S,T} = MOI.get(_get_storage(d), MOI.ConstraintSet(), ci) - -MOI.get(d::Incremental{S}, ::MOI.VariablePrimal, vi::VI) where S = MOI.get(d.optimizer, MOI.VariablePrimal(), vi)::Float64 - const SAF_CI_TYPES = Union{CI{SAF,LT},CI{SAF,GT},CI{SAF,ET}} function MOI.get(d::Incremental{S}, ::MOI.ConstraintPrimal, ci::SAF_CI_TYPES) where S MOI.get(d.optimizer, MOI.ConstraintPrimal(), ci)::Float64 end - const SQF_CI_TYPES = Union{CI{SQF,LT},CI{SQF,GT},CI{SQF,ET}} function MOI.get(d::Incremental{S}, ::MOI.ConstraintPrimal, ci::SQF_CI_TYPES) where S MOI.get(d.optimizer, MOI.ConstraintPrimal(), ci)::Float64 end - function MOI.get(d::Incremental{S}, ::MOI.ConstraintDual, ci::Union{CI{VI,LT},CI{VI,GT}}) where S MOI.get(d.optimizer, MOI.ConstraintDual(), ci)::Float64 end -function MOI.get(d::Incremental{S}, ::MOI.ResultCount) where S - MOI.get(d.optimizer, MOI.ResultCount())::Int -end -function MOI.get(d::Incremental{S}, n::MOI.SolverName) where S - MOI.get(d.optimizer, n)::String -end -MOI.get(d::Incremental{S}, n::MOI.ListOfConstraintTypesPresent) where S = MOI.get(_get_storage(d), n) -# define optimize! +# Define optimize! MOI.optimize!(d::Incremental{S}) where S = MOI.optimize!(S, d) MOI.optimize!(x, d::Incremental{S}) where S = MOI.optimize!(_get_storage(d)) diff --git a/src/eago_optimizer/types/log.jl b/src/eago_optimizer/types/log.jl index e155600d..09e8ec3b 100644 --- a/src/eago_optimizer/types/log.jl +++ b/src/eago_optimizer/types/log.jl @@ -1,13 +1,14 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/logging/log.jl -# Defines all type used to store solution information at given iterations -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_optimizer/types/log.jl +# Defines all type used to store solution information at given iterations. +################################################################################ """ $(TYPEDEF) diff --git a/src/eago_optimizer/types/node_bb.jl b/src/eago_optimizer/types/node_bb.jl index 06811e83..af4db207 100644 --- a/src/eago_optimizer/types/node_bb.jl +++ b/src/eago_optimizer/types/node_bb.jl @@ -1,17 +1,18 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/node_bb.jl -# Defines storage for a node in the B&B tree & utilities functions -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_optimizer/types/node_bb.jl +# Defines storage for a node in the B&B tree and utilities functions. +################################################################################ @enum(BranchDirection, BD_NONE, BD_NEG, BD_POS) -# Used internally to set & get variables in full problem space or just branch variables +# Used internally to set and get variables in full problem space or just branch variables struct FullVar end struct BranchVar end Base.Broadcast.broadcastable(d::FullVar) = Ref(d) @@ -65,7 +66,7 @@ Base.copy(x::NodeBB) = NodeBB(copy(x.lower_variable_bounds), copy(x.upper_variab x.continuous, x.lower_bound, x.upper_bound, x.depth, x.cont_depth, x.id, x.branch_direction, x.last_branch, x.branch_extent) -# using alternative name as to not interfere with ordering... +# Using alternative name as to not interfere with ordering function uninitialized(x::NodeBB) flag = isempty(x.lower_variable_bounds) flag &= isempty(x.upper_variable_bounds) @@ -93,7 +94,7 @@ end @inline depth(x::NodeBB) = x.depth @inline cont_depth(x::NodeBB) = x.cont_depth -# Iterations Functions +# Iterations functions Base.isless(x::NodeBB, y::NodeBB) = x.lower_bound < y.lower_bound Base.length(x::NodeBB) = length(x.lower_variable_bounds) function Base.isempty(x::NodeBB) diff --git a/src/eago_optimizer/utilities.jl b/src/eago_optimizer/utilities.jl index a314c16a..a48402ca 100644 --- a/src/eago_optimizer/utilities.jl +++ b/src/eago_optimizer/utilities.jl @@ -1,13 +1,14 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ # src/eago_optimizer/unsafe_utilities.jl # Inbounds and non-allocating versions of simple Julia functions. -############################################################################# +################################################################################ """ $(TYPEDSIGNATURES) diff --git a/src/eago_script/codetransformation.jl b/src/eago_script/codetransformation.jl index 406c527a..d3976833 100644 --- a/src/eago_script/codetransformation.jl +++ b/src/eago_script/codetransformation.jl @@ -1,7 +1,6 @@ -#= -Temporary copy of resources from experimental code transformation package -https://github.com/perrutquist/CodeTransformation.jl until it becomes tagged. -=# +# Temporary copy of resources from experimental code transformation package +# https://github.com/perrutquist/CodeTransformation.jl until it becomes tagged. + import Core: SimpleVector, svec, CodeInfo import Base: uncompressed_ast, unwrap_unionall @@ -12,7 +11,7 @@ jl_method_def(argdata::SimpleVector, ci::CodeInfo, mod::Module) = typevars(T::UnionAll) = (T.var, typevars(T.body)...) typevars(T::DataType) = () -@nospecialize # the below functions need not specialize on arguments +@nospecialize # The functions below need not specialize on arguments getmodule(F::Type{<:Function}) = F.name.mt.module getmodule(f::Function) = getmodule(typeof(f)) diff --git a/src/eago_script/loader.jl b/src/eago_script/loader.jl index a32c4ffb..6198cda0 100644 --- a/src/eago_script/loader.jl +++ b/src/eago_script/loader.jl @@ -1,13 +1,14 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ # src/eago_script/loader.jl # Utilities used to load user-defined functions into JuMP tapes. -############################################################################# +################################################################################ """ """ @@ -17,7 +18,7 @@ function tape_to_list(tape::Tape) new_nds = MOINL.Node[MOINL.Node(last_node.type, last_node.index, -1)] queue = Tuple{Int,Int}[(len, -1)] - parent_dict = Dict{Int,Int}(len => 1) # starting node is 1 + parent_dict = Dict{Int,Int}(len => 1) # Starting node is 1 node_count = 1 while !isempty(queue) @@ -66,7 +67,7 @@ function remove_subexpr_children!(expr::MOINL.Expression) active_node.type !== MOINL.NODE_VARIABLE && active_node.type !== MOINL.NODE_VALUE) @inbounds children_idx = nzrange(adj, node_num) - if (length(children_idx) > 0) # has any children + if (length(children_idx) > 0) # Has any children for child in children_idx @inbounds idx = children_arr[child] @inbounds cn = nd[idx] @@ -129,7 +130,7 @@ function udf_loader!(x::AbstractOptimizer) nlp_model = evaluator.m.nlp_model user_registry = nlp_model.operators - # extracts tape from multivariate udf functions and creates subexpressions + # Extracts tape from multivariate udf functions and creates subexpressions multi_op_eval = user_registry.multivariate_operator_evaluator for mul_eval in multi_op_eval tape = trace_script(mul_eval.f, mul_eval.len) @@ -143,8 +144,8 @@ function udf_loader!(x::AbstractOptimizer) add_subexpr_from_tape!(tape, nlp_model) end - # replaces references in expressions to udfs with reference to subexpr - # and remove any children of subexpressions (since subexpressions are terminal nodes) + # Replaces references in expressions to udfs with reference to subexpr + # and removes any children of subexpressions (since subexpressions are terminal nodes) nlexpr = nlp_model.expressions nlexpr_count = length(nlexpr) for i = 1:nlexpr_count @@ -169,13 +170,13 @@ function udf_loader!(x::AbstractOptimizer) x.presolve_flatten_flag && flatten_expression!(constr.terms, parameter_values) end - # void previously defined udfs + # Void previously defined udfs nlp_model.operators = OperatorRegistry() evaluator.m.nlp_model = nlp_model evaluator.eval_objective_timer = 0.0 x._nlp_data = NLPBlockData(x._nlp_data.constraint_bounds, evaluator, x._nlp_data.has_objective) - # reinitialize evaluator + # Reinitialize evaluator init_feat = Symbol[:Grad, :Hess] num_nlp_constraints = length(x._nlp_data.constraint_bounds) num_nlp_constraints > 0 && push!(init_feat, :Jac) diff --git a/src/eago_script/patterns.jl b/src/eago_script/patterns.jl index 884fa971..89ecc151 100644 --- a/src/eago_script/patterns.jl +++ b/src/eago_script/patterns.jl @@ -1,15 +1,16 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ # src/eago_script/patterns.jl # Patterns to use in transormations that flatten expressions. -############################################################################# +################################################################################ -# (1) register log(a^x) = x*log(a) DONE +# (1) Register log(a^x) = x*log(a) DONE src_nds = Dict{Int, Template_Node}(1 => Template_Node(:op, :log), 2 => Template_Node(:op, :^), 3 => Template_Node(:num, :a; check = a -> a >= 0.0), @@ -24,7 +25,7 @@ dest_dag = [4 => 3, 3 => 1, 2 => 1] dest = Template_Graph(dest_nds, dest_dag) register_substitution!(src, dest) -# (2) register exp(x)*exp(y) -> exp(x+y) DONE +# (2) Register exp(x)*exp(y) -> exp(x+y) DONE src_nds = Dict{Int, Template_Node}(1 => Template_Node(:op, :*), 2 => Template_Node(:op, :exp), 3 => Template_Node(:op, :exp), @@ -40,7 +41,7 @@ dest_dag = [4 => 2, 3 => 2, 2 => 1] dest = Template_Graph(dest_nds, dest_dag) register_substitution!(src, dest) -# (3) register (a^{x})^{b} = (a^{b})^{x} # DONE +# (3) Register (a^{x})^{b} = (a^{b})^{x} DONE src_nds = Dict{Int, Template_Node}(1 => Template_Node(:op, :^), 2 => Template_Node(:op, :^), 3 => Template_Node(:num, :b), @@ -57,7 +58,7 @@ dest_dag = [5 => 2, 4 => 2, 3 => 1, 2 => 1] dest = Template_Graph(dest_nds, dest_dag) register_substitution!(src, dest) -# (4) register (x^{a})^{b} = x^{(ab)} +# (4) Register (x^{a})^{b} = x^{(ab)} #= src_nds = Dict{Int, Template_Node}(1 => Template_Node(:op, :^), 2 => Template_Node(:op, :^), @@ -76,7 +77,7 @@ dest = Template_Graph(dest_nds, dest_dag) register_substitution!(src, dest) =# -# (5) register a^{\log(x)} = x^{\log(a)} DONE +# (5) Register a^{\log(x)} = x^{\log(a)} DONE src_nds = Dict{Int, Template_Node}(1 => Template_Node(:op, :^), 2 => Template_Node(:num, :a), 3 => Template_Node(:op, :log), @@ -91,7 +92,7 @@ dest_dag = [4 => 3, 3 => 1, 2 => 1] dest = Template_Graph(dest_nds, dest_dag) register_substitution!(src, dest) -# (6) register \log(xy) = \log(x) + \log(y) DONE +# (6) Register \log(xy) = \log(x) + \log(y) DONE src_nds = Dict{Int, Template_Node}(1 => Template_Node(:op, :log), 2 => Template_Node(:op, :*), 3 => Template_Node(:expr, :x), @@ -107,7 +108,7 @@ dest_dag = [5 => 3, 4 => 2, 3 => 1, 2 => 1] dest = Template_Graph(dest_nds, dest_dag) register_substitution!(src, dest) -# (7) register \log(x/y) = \log(x) - \log(y) DONE +# (7) Register \log(x/y) = \log(x) - \log(y) DONE src_nds = Dict{Int, Template_Node}(1 => Template_Node(:op, :log), 2 => Template_Node(:op, :/), 3 => Template_Node(:expr, :x), diff --git a/src/eago_script/script.jl b/src/eago_script/script.jl index e6f49ec8..81f7cf08 100644 --- a/src/eago_script/script.jl +++ b/src/eago_script/script.jl @@ -1,13 +1,14 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ # src/eago_script/script.jl # A module used to manipulate script function inputs. -############################################################################# +################################################################################ module Script diff --git a/src/eago_script/scrubber.jl b/src/eago_script/scrubber.jl index e45d6a40..275e2777 100644 --- a/src/eago_script/scrubber.jl +++ b/src/eago_script/scrubber.jl @@ -1,14 +1,15 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ # src/eago_script/scrubber.jl # A context used to "scrub" type specific storage, assertions, and other # language features from a user-defined function. -############################################################################# +################################################################################ @context ScrubCtx @@ -38,7 +39,7 @@ function Cassette.overdub(ctx::ScrubCtx, ::typeof(vcat), A...) return vR end -# scrub type assertion from definition of udf +# Scrub type assertion from definition of udf function generated_scrubbed_method(f::Function, n::Int, inplace = false) ci = code_lowered(f) @assert(length(ci) == 1) @@ -85,7 +86,7 @@ Applies scrub to every user-defined function in the a `MOI.NLPBlockData` structu """ function scrub!(d::MOI.NLPBlockData) error("Function not updated for newest version of EAGO. Please submit an issue.") - # scrub multivariant + # Scrub multivariant user_ops = d.user_operators mvop_num = length(user_ops.multivariate_operator_evaluator) for i in 1:mvop_num @@ -99,7 +100,7 @@ function scrub!(d::MOI.NLPBlockData) user_ops.multivariate_operator_evaluator[i] = evalr end - # scrub univariants + # Scrub univariants svop_num = length(user_ops.univariate_operator_f) for i in 1:svop_num @@ -118,6 +119,6 @@ function scrub!(d::MOI.NLPBlockData) user_ops.univariate_operator_fprimeprime[i] = fnew end - # reassign evaluator + # Reassign evaluator d.user_operators = user_ops end \ No newline at end of file diff --git a/src/eago_script/substitute.jl b/src/eago_script/substitute.jl index 31987d43..1e4ce572 100644 --- a/src/eago_script/substitute.jl +++ b/src/eago_script/substitute.jl @@ -1,13 +1,14 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ # src/eago_script/substitute.jl # Utilities that are used to substitute expressions in the computational tape. -############################################################################# +################################################################################ """ Template_Node @@ -68,11 +69,9 @@ const DAG_SUBSTITUTIONS = Template_Graph[] const DAG_SPDICT = Dict{Int,Int}[] const DAG_LENGTHS = Int[0,0] -#= -conventions for substition, the expression to be checked always appears at key 1 -in the Template_Graph and operations are ordered from low value to high value left to right -so if 1 is a -, and 4 => 1, 3 => 1 then the expression is 4 - 3 -=# +# Conventions for substition. The expression to be checked always appears at key 1 +# in the Template_Graph and operations are ordered from lowest to highest value, from +# left to right, so if 1 is a -, and 4 => 1, 3 => 1 then the expression is 4 - 3 """ register_substitution! @@ -165,8 +164,8 @@ function is_match(pattern::Template_Graph, indx::Int, nd::Vector{MOINL.Node}, da pat_children_arr = rowvals(pattern_adj) dag_children_arr = rowvals(dag_adj) - # do a breadth first search with paired template, nd data, - # if any pair of children fail then + # Do a breadth first search with paired template and nd data. + # If any pair of children fail, then pindx_initial = 1 queue = Tuple{Int,Int}[(pindx_initial, indx)] while (~isempty(queue) && (match_flag == true)) @@ -230,10 +229,8 @@ function find_match(indx::Int, nd::Vector{MOINL.Node}, adj::SparseMatrixCSC{Bool return flag, pattern_number, match_dict end -#= -Takes a template node and makes the appropriate JuMP node, takes the parent index, -number of child for a pattern element, constant storage vector and it's length -=# +# Takes a template node and makes the appropriate JuMP node, takes the parent index, +# number of child for a pattern element, constant storage vector and it's length function op_node_to_dag!(x::Template_Node, parent::Int, child_len::Int) multivariate_operator_to_id = Dict{Symbol,Int}() for i in 1:length(DEFAULT_MULTIVARIATE_OPERATORS) @@ -260,10 +257,10 @@ function bfs_expr_add!(new_nds::Vector{MOINL.Node}, node_count::Int, num_prt::In queue = Tuple{Int,Int}[(expr_loc, num_prt)] inner_node_count = node_count while ~isempty(queue) - (node_num, prior_prt) = popfirst!(queue) # pop node - @inbounds active_node = nd[node_num] # store node + (node_num, prior_prt) = popfirst!(queue) # Pop node + @inbounds active_node = nd[node_num] # Store node new_node = MOINL.Node(active_node.type, active_node.index, prior_prt) - inner_node_count += 1 # update node count + inner_node_count += 1 # Update node count @inbounds parent_dict[num_prt] = inner_node_count push!(new_nds, new_node) if (active_node.type !== MOINL.NODE_SUBEXPRESSION && @@ -282,7 +279,7 @@ function bfs_expr_add!(new_nds::Vector{MOINL.Node}, node_count::Int, num_prt::In inner_node_count end -# we assume a tree structure, so if we don't load child nodes, +# We assume a tree structure, so if we don't load child nodes, # then they are effectively deleted function substitute!(match_num::Int, node_num::Int, prior_prt::Int, nd::Vector{MOINL.Node}, const_list::Vector{Float64}, const_len::Int, node_count::Int, @@ -340,7 +337,7 @@ function substitute!(match_num::Int, node_num::Int, prior_prt::Int, nd::Vector{M return inner_node_count, const_len end -# searchs through expression breadth first search that short-cirucits +# Searchs through expression breadth first search that short-cirucits """ flatten_expression! diff --git a/src/eago_script/tracer.jl b/src/eago_script/tracer.jl index 8e015140..249c6038 100644 --- a/src/eago_script/tracer.jl +++ b/src/eago_script/tracer.jl @@ -1,13 +1,14 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ # src/eago_script/tracer.jl # Utilities for tracing a user-defined function. -############################################################################# +################################################################################ struct NodeInfo nodetype::MOINL.NodeType @@ -16,8 +17,8 @@ struct NodeInfo end children(x::NodeInfo) = x.children -# convert method assumes that the children connectivity using in Tracer has -# has been converted to an area hold a single parent value as is used in JuMP +# Convert method assumes that the children connectivity used in tracer has +# has been converted to an area. Holds a single parent value as is used in JuMP. convert(::Type{MOINL.Node}, x::NodeInfo) = MOINL.Node(x.type, x.index, x.children[1]) # val = 1..n corresponds to 1..n variable @@ -36,9 +37,9 @@ iterate(v::SetTraceSto, i=1) = (length(v.storage) < i ? nothing : (v.storage[i], export Tape -# JuMP convention is to store child from function... function call = -1, -# next highest call is has parent 1 and so on... This is a forward tape recording -# architecture, so we'll store children then invert... +# JuMP convention is to store child from function (function call = -1). +# Next highest call is has parent 1 and so on. This is a forward tape recording +# architecture, so we'll store children then invert. mutable struct Tape nd::Vector{NodeInfo} const_values::Vector{Float64} @@ -89,7 +90,7 @@ for i in 1:length(comparison_operators) comparison_operator_to_id[comparison_operators[i]] = i end -# defines primitives for MOINL.NODE_CALL_UNIVARIATE operators +# Defines primitives for MOINL.NODE_CALL_UNIVARIATE operators for i in (abs, sin, cos, tan, sec, csc, cot, asin, acos, atan, asec, acsc, acot, sinh, cosh, tanh, asinh, acosh, atanh, sech, asech, csch, acsch, coth, acoth, sqrt, log, log2, log10, log1p, exp, exp2, expm1, @@ -102,7 +103,7 @@ for i in (abs, sin, cos, tan, sec, csc, cot, asin, acos, atan, asec, acsc, @eval Cassette.overdub(ctx::TraceCtx, ::typeof($i), x::Real) = ($i)(x) end -# defines primitives for bivariate MOINL.NODE_CALL_MULTIVARIATE operators (NEED TO ADD ^) +# Defines primitives for bivariate MOINL.NODE_CALL_MULTIVARIATE operators (NEED TO ADD ^) for i in (+, -, *, ^, /) # TODO ADD :max, :min id = multivariate_operator_to_id[Symbol(i)] @eval function Cassette.overdub(ctx::TraceCtx, ::typeof($i), x::SetTrace, y::SetTrace) @@ -122,7 +123,7 @@ for i in (+, -, *, ^, /) # TODO ADD :max, :min @eval Cassette.overdub(ctx::TraceCtx, ::typeof($i), x::Real, y::Real) = ($i)(x,y) end -# defines primitives for bivariate MOINL.NODE_COMPARISON operators +# Defines primitives for bivariate MOINL.NODE_COMPARISON operators for i in (>,<,==,>=,<=) id = comparison_operator_to_id[Symbol(i)] @eval function Cassette.overdub(ctx::TraceCtx, ::typeof($i), x::SetTrace, y::SetTrace) @@ -142,16 +143,16 @@ for i in (>,<,==,>=,<=) @eval Cassette.overdub(ctx::TraceCtx, ::typeof($i), x::Real, y::Real) = ($i)(x, y) end -# define primitives for associative terms +# Defines primitives for associative terms Cassette.overdub(ctx::TraceCtx, ::typeof(*), x, y) = afoldl(x, y) Cassette.overdub(ctx::TraceCtx, ::typeof(afoldl), x, y, z) = afoldl(x, y, z) Cassette.overdub(ctx::TraceCtx, ::typeof(afoldl), a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p...) = afoldl(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p...) -# conversion +# Conversion Cassette.overdub(ctx::TraceCtx, ::typeof(float), x) = x Cassette.overdub(ctx::TraceCtx, ::typeof(AbstractFloat), x) = x -# primitive for array access +# Primitive for array access Cassette.overdub(ctx::TraceCtx, ::typeof(getindex), A::Array, i::Int) = getindex(A,i) Cassette.overdub(ctx::TraceCtx, ::typeof(getindex), A::SetTraceSto, i::Int) = getindex(A,i) @@ -162,7 +163,7 @@ function Cassette.overdub(ctx::TraceCtx, ::typeof(typeassert), x::Real, type::Ty return x end -# prehook for debugging mainly +# Prehook for debugging, mainly function Cassette.prehook(ctx::TraceCtx, f::Function, args...) #println(f, args) end diff --git a/src/eago_semiinfinite/nonconvex_algorithms/sip_hybrid.jl b/src/eago_semiinfinite/nonconvex_algorithms/sip_hybrid.jl index 478a998a..d18d313f 100644 --- a/src/eago_semiinfinite/nonconvex_algorithms/sip_hybrid.jl +++ b/src/eago_semiinfinite/nonconvex_algorithms/sip_hybrid.jl @@ -1,17 +1,18 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_semiinfinite/algorithms/sip_hybrid.jl +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_semiinfinite/nonconvex_algorithms/sip_hybrid.jl # FUTURE FEATURE... NOT CURRENTLY FUNCTIONAL # Defines the SIP-hybrid algorithm which implements Algorithm #2 of Djelassi, # Hatim, and Alexander Mitsos. "A hybrid discretization algorithm with guaranteed # feasibility for the global solution of semi-infinite programs." # Journal of Global Optimization 68.2 (2017): 227-253. -############################################################################# +################################################################################ """ SIPHybrid @@ -46,10 +47,10 @@ function sip_solve!(t::ExtensionType, alg::SIPHybrid, buffer::SIPSubResult, prob verb = prob.verbosity - # begin main solution loop + # Begin main solution loop @label main_iteration - # solve lower bounding problem and check feasibility + # Solve lower bounding problem and check feasibility sip_bnd!(t, alg, LowerProblem(), buffer, result, prob, cb) result.lower_bound = buffer.lbd.obj_val if !buffer.lbd.feas @@ -59,7 +60,7 @@ function sip_solve!(t::ExtensionType, alg::SIPHybrid, buffer::SIPSubResult, prob end print_summary!(LowerProblem(), verb, buffer) - # solve inner program and update lower discretization set + # Solve inner program and update lower discretization set is_llp1_nonpositive = true for i = 1:prob.nSIP sip_llp!(t, alg, LowerLevel1(), result, buffer, prob, cb, i) @@ -76,7 +77,7 @@ function sip_solve!(t::ExtensionType, alg::SIPHybrid, buffer::SIPSubResult, prob end end - # if the lower problem is feasible then it's solution is the optimal value + # If the lower problem is feasible, then it's solution is the optimal value if is_llp1_nonpositive result.upper_bound = buffer.lbd.obj_val result.xsol .= buffer.lbd.sol @@ -85,7 +86,7 @@ function sip_solve!(t::ExtensionType, alg::SIPHybrid, buffer::SIPSubResult, prob end - # solve upper bounding problem, if feasible solve lower level problem, + # Solve upper bounding problem, if feasible solve lower level problem, # and potentially update upper discretization set @label upper_problem sip_bnd!(t, alg, UpperProblem(), buffer, result, prob, cb) @@ -115,7 +116,7 @@ function sip_solve!(t::ExtensionType, alg::SIPHybrid, buffer::SIPSubResult, prob end check_convergence(result, prob.abs_tolerance, verb) && @goto main_end - # solve restriction problem updating lower and upper bound as appropriate + # Solve restriction problem updating lower and upper bound as appropriate @label res_problem sip_res!(t, alg, buffer, result, prob, cb) if buffer.res.obj_bnd < 0 @@ -157,7 +158,7 @@ function sip_solve!(t::ExtensionType, alg::SIPHybrid, buffer::SIPSubResult, prob @goto main_iteration end - # print iteration information and advance + # Print iteration information and advance print_int!(verb, prob, result, buffer.r_g) result.iteration_number += 1 result.iteration_number < prob.iteration_limit && @goto main_iteration diff --git a/src/eago_semiinfinite/nonconvex_algorithms/sip_res.jl b/src/eago_semiinfinite/nonconvex_algorithms/sip_res.jl index 66e3a506..a43ca071 100644 --- a/src/eago_semiinfinite/nonconvex_algorithms/sip_res.jl +++ b/src/eago_semiinfinite/nonconvex_algorithms/sip_res.jl @@ -1,13 +1,14 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_semiinfinite/algorithms/sip_hybrid.jl -# Defines the SIP-res algorithm which implements Algorithm #1 of XXX. -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_semiinfinite/nonconvex_algorithms/sip_hybrid.jl +# Defines the SIP-res algorithm which implements Algorithm #1 of XXX. #TODO: Find the citation for this. +################################################################################ """ SIPRes @@ -33,11 +34,11 @@ function sip_solve!(t::ExtensionType, alg::SIPRes, buffer::SIPSubResult, prob::S verb = prob.verbosity - # initializes solution + # Initializes solution @label main_iteration check_convergence(result, prob.abs_tolerance, verb) && @goto main_end - # solve lower bounding problem and check feasibility + # Solve lower bounding problem and check feasibility sip_bnd!(t, alg, LowerProblem(), buffer, result, prob, cb) result.lower_bound = buffer.lbd.obj_val if !buffer.lbd.feas @@ -47,7 +48,7 @@ function sip_solve!(t::ExtensionType, alg::SIPRes, buffer::SIPSubResult, prob::S end print_summary!(LowerProblem(), verb, buffer) - # solve inner program and update lower discretization set + # Solve inner program and update lower discretization set is_llp1_nonpositive = true for i = 1:prob.nSIP sip_llp!(t, alg, LowerLevel1(), result, buffer, prob, cb, i) @@ -61,7 +62,7 @@ function sip_solve!(t::ExtensionType, alg::SIPRes, buffer::SIPSubResult, prob::S end end - # if the lower problem is feasible then it's solution is the optimal value + # If the lower problem is feasible, then it's solution is the optimal value if is_llp1_nonpositive result.upper_bound = buffer.lbd.obj_val result.xsol .= buffer.lbd.sol @@ -69,7 +70,7 @@ function sip_solve!(t::ExtensionType, alg::SIPRes, buffer::SIPSubResult, prob::S @goto main_end end - # solve upper bounding problem, if feasible solve lower level problem, + # Solve upper bounding problem, if feasible solve lower level problem, # and potentially update upper discretization set sip_bnd!(t, alg, UpperProblem(), buffer, result, prob, cb) print_summary!(UpperProblem(), verb, buffer) @@ -97,9 +98,9 @@ function sip_solve!(t::ExtensionType, alg::SIPRes, buffer::SIPSubResult, prob::S buffer.eps_g ./= buffer.r_g end - @show "FINISHED ONE ITERATION..." + println("FINISHED ONE ITERATION") - # print iteration information and advance + # Print iteration information and advance print_int!(verb, prob, result, buffer.r_g) result.iteration_number += 1 result.iteration_number < prob.iteration_limit && @goto main_iteration diff --git a/src/eago_semiinfinite/nonconvex_algorithms/sip_res_rev.jl b/src/eago_semiinfinite/nonconvex_algorithms/sip_res_rev.jl index ceac5a7e..23a27bb8 100644 --- a/src/eago_semiinfinite/nonconvex_algorithms/sip_res_rev.jl +++ b/src/eago_semiinfinite/nonconvex_algorithms/sip_res_rev.jl @@ -1,16 +1,17 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_semiinfinite/algorithms/sip_res_rev.jl +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_semiinfinite/nonconvex_algorithms/sip_res_rev.jl # Defines the revised SIP-res algorithm which implements Algorithm #1 of Djelassi, # Hatim, and Alexander Mitsos. "A hybrid discretization algorithm with guaranteed # feasibility for the global solution of semi-infinite programs." # Journal of Global Optimization 68.2 (2017): 227-253. -############################################################################# +################################################################################ """ SIPResRev @@ -47,11 +48,11 @@ function sip_solve!(t::ExtensionType, alg::SIPResRev, buffer::SIPSubResult, prob verb = prob.verbosity - # initializes solution + # Initializes solution @label main_iteration check_convergence(result, prob.abs_tolerance, verb) && @goto main_end - # solve lower bounding problem and check feasibility + # Solve lower bounding problem and check feasibility sip_bnd!(t, alg, LowerProblem(), buffer, result, prob, cb) result.lower_bound = buffer.lbd.obj_val if !buffer.lbd.feas @@ -61,7 +62,7 @@ function sip_solve!(t::ExtensionType, alg::SIPResRev, buffer::SIPSubResult, prob end print_summary!(LowerProblem(), verb, buffer) - # solve inner program and update lower discretization set + # Solve inner program and update lower discretization set is_llp1_nonpositive = true for i = 1:prob.nSIP sip_llp!(t, alg, LowerLevel1(), result, buffer, prob, cb, i) @@ -78,7 +79,7 @@ function sip_solve!(t::ExtensionType, alg::SIPResRev, buffer::SIPSubResult, prob end end - # if the lower problem is feasible then it's solution is the optimal value + # If the lower problem is feasible, then it's solution is the optimal value if is_llp1_nonpositive result.upper_bound = buffer.lbd.obj_val result.xsol .= buffer.lbd.sol @@ -86,7 +87,7 @@ function sip_solve!(t::ExtensionType, alg::SIPResRev, buffer::SIPSubResult, prob @goto main_end end - # solve upper bounding problem, if feasible solve lower level problem, + # Solve upper bounding problem, if feasible solve lower level problem, # and potentially update upper discretization set sip_bnd!(t, alg, UpperProblem(), buffer, result, prob, cb) print_summary!(UpperProblem(), verb, buffer) @@ -114,7 +115,7 @@ function sip_solve!(t::ExtensionType, alg::SIPResRev, buffer::SIPSubResult, prob buffer.eps_g ./= buffer.r_g end - # print iteration information and advance + # Print iteration information and advance print_int!(verb, prob, result, buffer.r_g) result.iteration_number += 1 result.iteration_number < prob.iteration_limit && @goto main_iteration diff --git a/src/eago_semiinfinite/semiinfinite.jl b/src/eago_semiinfinite/semiinfinite.jl index fa2d670d..f4598a3d 100644 --- a/src/eago_semiinfinite/semiinfinite.jl +++ b/src/eago_semiinfinite/semiinfinite.jl @@ -1,13 +1,14 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ # src/eago_semiinfinite/semi_infinite.jl -# Defines high level interface for SIP routines. -############################################################################# +# Defines a high-level interface for SIP routines. +################################################################################ include("types.jl") include("subproblems.jl") diff --git a/src/eago_semiinfinite/subproblems.jl b/src/eago_semiinfinite/subproblems.jl index 0022f4c5..4d9e3ad8 100644 --- a/src/eago_semiinfinite/subproblems.jl +++ b/src/eago_semiinfinite/subproblems.jl @@ -1,13 +1,14 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_semiinfinite/sub_problems.jl +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_semiinfinite/subproblems.jl # Defines utilities for generic SIP subroutines. -############################################################################# +################################################################################ """ build_model @@ -115,11 +116,11 @@ function sip_llp!(t::DefaultExt, alg::A, s::S, result::SIPResult, sr::SIPSubResult, prob::SIPProblem, cb::SIPCallback, i::Int64, tol::Float64 = -Inf) where {A <: AbstractSIPAlgo, S <: AbstractSubproblemType} - # build the model + # Build the model m, p = build_model(t, alg, s, prob) set_tolerance!(t, alg, s, m, sr, i) - # define the objective + # Define the objective xbar = get_xbar(t, alg, s, sr) g(p...) = cb.gSIP[i](xbar, p) register(m, :g, prob.np, g, autodiff=true) @@ -134,16 +135,16 @@ function sip_llp!(t::DefaultExt, alg::A, s::S, result::SIPResult, end set_nonlinear_objective(m, MOI.MAX_SENSE, nl_obj) - # add uncertainty constraints + # Add uncertainty constraints add_uncertainty_constraint!(m, prob) - # optimize model and check status + # Optimize model and check status JuMP.optimize!(m) tstatus = JuMP.termination_status(m) rstatus = JuMP.primal_status(m) feas = llp_check(prob.local_solver, tstatus, rstatus) - # fill buffer with subproblem result info + # Fill buffer with subproblem result info psol = JuMP.value.(p) load!(s, sr, feas, JuMP.objective_value(m), JuMP.objective_bound(m), psol) result.solution_time += MOI.get(m, MOI.SolveTimeSec()) @@ -172,7 +173,7 @@ function sip_bnd!(t::DefaultExt, alg::A, s::S, sr::SIPSubResult, result::SIPResu prob::SIPProblem, cb::SIPCallback) where {A <: AbstractSIPAlgo, S <: AbstractSubproblemType} - # create JuMP model + # Create JuMP model m, x = build_model(t, alg, s, prob) for i = 1:prob.nSIP @@ -191,7 +192,7 @@ function sip_bnd!(t::DefaultExt, alg::A, s::S, sr::SIPSubResult, result::SIPResu end end - # define the objective + # Define the objective obj(x...) = cb.f(x) register(m, :obj, prob.nx, obj, autodiff=true) if isone(prob.nx) @@ -205,13 +206,13 @@ function sip_bnd!(t::DefaultExt, alg::A, s::S, sr::SIPSubResult, result::SIPResu end set_nonlinear_objective(m, MOI.MIN_SENSE, nl_obj) - # optimize model and check status + # Optimize model and check status JuMP.optimize!(m) t_status = JuMP.termination_status(m) r_status = JuMP.primal_status(m) feas = bnd_check(prob.local_solver, t_status, r_status) - # fill buffer with subproblem result info + # Fill buffer with subproblem result info load!(s, sr, feas, JuMP.objective_value(m), JuMP.objective_bound(m), JuMP.value.(x)) result.solution_time += MOI.get(m, MOI.SolveTimeSec()) @@ -232,12 +233,12 @@ end function sip_res!(t::DefaultExt, alg::A, sr::SIPSubResult, result::SIPResult, prob::SIPProblem, cb::SIPCallback) where {A <: AbstractSIPAlgo} - # create JuMP model & variables + # Create JuMP model and variables s = ResProblem() m, x = build_model(t, alg, s, prob) @variable(m, η) - # add discretized semi-infinite constraint + # Add discretized semi-infinite constraint for i = 1:prob.nSIP disc_set = get_disc_set(t, alg, s, sr, i) for j = 1:length(disc_set) @@ -257,7 +258,7 @@ function sip_res!(t::DefaultExt, alg::A, sr::SIPSubResult, result::SIPResult, end end - # add epigraph reformulated objective + # Add epigraph reformulated objective obj(x...) = cb.f(x) register(m, :f, prob.nx, obj, autodiff=true) if isfinite(sr.fRes) @@ -273,23 +274,23 @@ function sip_res!(t::DefaultExt, alg::A, sr::SIPSubResult, result::SIPResult, JuMP.add_nonlinear_constraint(m, :($nl_obj + $(sr.fRes) <= 0)) end - # define the objective + # Define the objective @objective(m, Min, -η) - # optimize model and check status + # Optimize model and check status JuMP.optimize!(m) t_status = JuMP.termination_status(m) r_status = JuMP.primal_status(m) feas = llp_check(prob.local_solver, t_status, r_status) - # fill buffer with subproblem result info + # Fill buffer with subproblem result info load!(s, sr, feas, JuMP.objective_value(m), JuMP.objective_bound(m), JuMP.value.(x)) result.solution_time += MOI.get(m, MOI.SolveTimeSec()) return nothing end -# get optimizer for use in subproblems +# Get optimizer for use in subproblems function get_sip_optimizer(t::DefaultExt, alg::A, s::S) where {A<:AbstractSIPAlgo, S<:AbstractSubproblemType} return EAGO.Optimizer end @@ -314,12 +315,12 @@ function print_int!(verb::Int, prob::SIPProblem, result::SIPResult, r::Float64) if (prob.verbosity == 1 || prob.verbosity == 2) - # prints header line every hdr_intv times + # Prints header line every hdr_intv times if (mod(k, prob.header_interval) == 0 || k == 1) println("| Iteration | Lower Bound | Upper Bound | r | Gap | Ratio |") end - # prints iteration summary every prnt_intv times + # Prints iteration summary every prnt_intv times if mod(k, prob.print_interval) == 0 print_str = "| " diff --git a/src/eago_semiinfinite/types.jl b/src/eago_semiinfinite/types.jl index a3367cf8..491225b5 100644 --- a/src/eago_semiinfinite/types.jl +++ b/src/eago_semiinfinite/types.jl @@ -1,13 +1,14 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# +# Copyright (c) 2018: Matthew Wilhelm, Robert Gottlieb, Dimitri Alston, +# Matthew Stuber, and the University of Connecticut (UConn). +# This code is licensed under the MIT license (see LICENSE.md for full details). +################################################################################ # EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# # src/eago_semiinfinite/types.jl +# A development environment for robust and global optimization. +# https://github.com/PSORLab/EAGO.jl +################################################################################ +# src/eago_semiinfinite/types.jl # Defines intermediate types used by SIP routines. -############################################################################# +################################################################################ abstract type AbstractSIPAlgo end @@ -29,7 +30,7 @@ end """ SIPResult -Structure storing the results of the SIPres algorithm. +Structure storing the results of the `SIPRes` algorithm. """ mutable struct SIPResult iteration_number::Int64 diff --git a/src/subsolvers/clp.jl b/src/subsolvers/clp.jl index 8a3ffedd..19e33179 100644 --- a/src/subsolvers/clp.jl +++ b/src/subsolvers/clp.jl @@ -3,7 +3,7 @@ _is_incremental(::Type{Clp.Optimizer}) = false function set_default_config!(ext::ExtensionType, d::GlobalOptimizer, m::Clp.Optimizer, local_solver::Bool) MOI.set(m, MOI.RawOptimizerAttribute("PrimalTolerance"), _absolute_tol(d)*1E-2) MOI.set(m, MOI.RawOptimizerAttribute("DualTolerance"), _absolute_tol(d)*1E-2) - MOI.set(m, MOI.RawOptimizerAttribute("DualObjectiveLimit"), 1e308) + MOI.set(m, MOI.RawOptimizerAttribute("DualObjectiveLimit"), 1E308) MOI.set(m, MOI.RawOptimizerAttribute("MaximumIterations"), 2147483647) MOI.set(m, MOI.RawOptimizerAttribute("PresolveType"), 0) MOI.set(m, MOI.RawOptimizerAttribute("SolveType"), 5) diff --git a/test/domain_reduction.jl b/test/domain_reduction.jl index dd721ec6..4736f0b3 100644 --- a/test/domain_reduction.jl +++ b/test/domain_reduction.jl @@ -13,6 +13,6 @@ @test lvb[1] == 1.0; @test uvb[1] == 1.04 @test lvb[2] == 1.0; @test uvb[2] == 4.0 @test lvb[3] == 1.0; @test uvb[3] == 3.0 - # lvb[3] doesn't tighten since dbbt assumes variables aren't fixed... + # lvb[3] doesn't tighten since dbbt assumes variables aren't fixed @test isapprox(lvb[4], 3.33333333, atol= 1E-5); @test uvb[4] == 4.0 end \ No newline at end of file diff --git a/test/minlp_tests.jl b/test/minlp_tests.jl index 6acbbc54..a70f2d39 100644 --- a/test/minlp_tests.jl +++ b/test/minlp_tests.jl @@ -3,7 +3,7 @@ using MINLPTests solver = JuMP.optimizer_with_attributes(EAGO.Optimizer, "relative_tolerance" => 1E-9) minlp_nlp_exclude = String[ - "001_010", # Unbounded box, check solution bad if not gradient-based.... + "001_010", # Unbounded box, check solution bad if not gradient-based "002_010", # Unbounded box # "003_010", # "003_011", @@ -27,10 +27,10 @@ MINLPTests.test_nlp(solver, exclude = minlp_nlp_exclude, minlp_nlp_cvx_exclude = String[ "001_010", - "001_011", # convex quadratic objective... (linear unbounded...) + "001_011", # Convex quadratic objective (linear unbounded) "002_010", - "002_011", # unbounded linear problem & convex quadratic objective - "101_010", # convex quadratic nl constraints... + "002_011", # Unbounded linear problem and convex quadratic objective + "101_010", # Convex quadratic nonlinear constraints "101_011", "101_012", "102_010", @@ -48,8 +48,8 @@ minlp_nlp_cvx_exclude = String[ "105_011", "105_012", "105_013", - "106_010", # simple bounded domain - "106_011", # + "106_010", # Simple bounded domain + "106_011", "107_010", "107_011", "107_012", @@ -88,15 +88,15 @@ MINLPTests.test_nlp_cvx(solver, exclude = minlp_nlp_cvx_exclude, primal_target = MINLPTests.PRIMAL_TARGET_GLOBAL) minlp_nlp_mi_exclude = String[ - "001_010", # no box constraints + "001_010", # No box constraints "002_010", - "003_010", # TODO: Fix 003_010 - 003_016 + "003_010", # TODO: Fix 003_010 - 003_016 "003_011", # FAIL "003_012", # FAIL "003_013", # FAIL - "003_014", # FAIL Never converges... - "003_015", #FAIL + "003_014", # FAIL (ever converges) + "003_015", # FAIL "003_016", "004_010", @@ -106,7 +106,7 @@ minlp_nlp_mi_exclude = String[ "005_011", # \ operator not in JuMP "006_010", "007_010", - "007_020" # no way of specifying + "007_020" # No way of specifying ] MINLPTests.test_nlp_mi(solver, exclude = minlp_nlp_mi_exclude, objective_tol = 1E-3, primal_tol = 1E-3, diff --git a/test/moit_tests.jl b/test/moit_tests.jl index b43f344c..c22c0b53 100644 --- a/test/moit_tests.jl +++ b/test/moit_tests.jl @@ -7,7 +7,7 @@ using Test const MOI = MathOptInterface const OPTIMIZER = MOI.instantiate(MOI.OptimizerWithAttributes(EAGO.Optimizer, MOI.Silent() => true)) const BRIDGED = MOI.instantiate(MOI.OptimizerWithAttributes(EAGO.Optimizer, MOI.Silent() => true), with_bridge_type = Float64) -const CONFIG = MOI.Test.Config(atol = 1e-3, rtol = 1e-3, optimal_status = MOI.OPTIMAL, +const CONFIG = MOI.Test.Config(atol = 1E-3, rtol = 1E-3, optimal_status = MOI.OPTIMAL, exclude = Any[MOI.DualObjectiveValue, MOI.ConstraintBasisStatus, MOI.VariableName, MOI.ConstraintName, MOI.delete, MOI.ConstraintDual, MOI.ListOfModelAttributesSet, MOI.add_constrained_variables]) @@ -63,18 +63,18 @@ function test_runtests() "test_linear_integration_delete_variables", # EAGO Exclusions to Resolve (by adding conic support later and fixing twice solve issues) - "conic_NormOneCone_VectorAffineFunction", - "conic_NormOneCone_VectorOfVariables", - "conic_NormInfinityCone_VectorOfVariables", - "conic_NormInfinityCone_VectorAffineFunction", + "test_conic_NormOneCone_VectorAffineFunction", + "test_conic_NormOneCone_VectorOfVariables", + "test_conic_NormInfinityCone_VectorOfVariables", + "test_conic_NormInfinityCone_VectorAffineFunction", "test_conic_NormInfinityCone_3", - "test_conic_NormOneCone", - "test_conic_linear_VectorOfVariables", - "conic_linear_VectorAffineFunction", - "conic_linear_VectorAffineFunction_2", + r"^test_conic_NormOneCone$", + r"^test_conic_linear_VectorOfVariables$", + r"^test_conic_linear_VectorAffineFunction$", + "test_conic_linear_VectorAffineFunction_2", - "linear_integer_solve_twice", - "linear_integration", + "test_linear_integer_solve_twice", + r"^test_linear_integration$", "test_quadratic_SecondOrderCone_basic", "test_quadratic_constraint_GreaterThan", @@ -105,14 +105,12 @@ function test_runtests() "test_linear_Semicontinuous_integration", "test_linear_Semiinteger_integration", - # Remove these tests after MOI 1.17.2 - "test_objective_ObjectiveSense_in_ListOfModelAttributesSet", - "test_objective_ScalarAffineFunction_in_ListOfModelAttributesSet", - "test_objective_ScalarQuadraticFunction_in_ListOfModelAttributesSet", - "test_objective_VariableIndex_in_ListOfModelAttributesSet" + # EAGO B&B handles interval bounds internally + "test_linear_open_intervals", + "test_linear_variable_open_intervals" ], - exclude_tests_after = v"0.10.5") + exclude_tests_after = v"1.18.0") end end diff --git a/test/optimizer.jl b/test/optimizer.jl index d42c683b..dcb8ed17 100644 --- a/test/optimizer.jl +++ b/test/optimizer.jl @@ -39,7 +39,7 @@ end model = EAGO.Optimizer() - # Termination Status Code Checks + # Termination status code checks model._termination_status_code = MOI.OPTIMAL status = @inferred MOI.get(model, MOI.TerminationStatus()) @test status == MOI.OPTIMAL @@ -703,11 +703,20 @@ end end @testset "Display Testset" begin - m = EAGO.Optimizer() - MOI.set(m, MOI.RawOptimizerAttribute("verbosity"), 2) - @test_nowarn EAGO.print_solution!(m._global_optimizer) - @test_nowarn EAGO.print_results!(m._global_optimizer, true) - @test_nowarn EAGO.print_results!(m._global_optimizer, false) - @test_nowarn EAGO.print_iteration!(m._global_optimizer) - @test_nowarn EAGO.print_node!(m._global_optimizer) + x = EAGO.Optimizer() + m = x._global_optimizer + d = EAGO._relaxed_optimizer(m) + MOI.set(x, MOI.RawOptimizerAttribute("verbosity"), 3) + MOI.set(x, MOI.RawOptimizerAttribute("log_on"), true) + END_STATES = instances(EAGO.GlobalEndState) + # Test print statement for every possible end state + for i in eachindex(END_STATES) + m._end_state = END_STATES[i] + @test_nowarn EAGO.print_solution!(m) + end + @test_nowarn EAGO.print_results!(m, true) + @test_nowarn EAGO.print_results!(m, false) + @test_nowarn EAGO.print_iteration!(m) + @test_nowarn EAGO.print_node!(m) + @test_nowarn EAGO.print_problem_summary!(d, "Display Test") end \ No newline at end of file