diff --git a/CMakeLists.txt b/CMakeLists.txt index 53b2bf60..6f91eb00 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -138,12 +138,17 @@ if (WITH_THREADLOCAL AND NOT DEFINED HAVE_THREAD_LOCAL_STORAGE) endif () if (NLOPT_CXX OR NLOPT_PYTHON OR NLOPT_GUILE OR NLOPT_OCTAVE) - check_cxx_symbol_exists (_LIBCPP_VERSION string SYSTEM_HAS_LIBCPP) - if (SYSTEM_HAS_LIBCPP) + check_cxx_symbol_exists (__cplusplus ciso646 SYSTEM_HAS_CXX) + if (SYSTEM_HAS_CXX) check_cxx_compiler_flag ("-std=c++11" SUPPORTS_STDCXX11) if (SUPPORTS_STDCXX11) set (CMAKE_CXX_FLAGS "-std=c++11 ${CMAKE_CXX_FLAGS}") + if (NLOPT_CXX) + set (NLOPT_CXX11 ON) + endif () endif () + else() + message (FATAL_ERROR "The compiler doesn't support CXX.") endif () endif () @@ -202,6 +207,10 @@ if (NLOPT_CXX) list (APPEND NLOPT_SOURCES src/algs/stogo/global.cc src/algs/stogo/linalg.cc src/algs/stogo/local.cc src/algs/stogo/stogo.cc src/algs/stogo/tools.cc src/algs/stogo/global.h src/algs/stogo/linalg.h src/algs/stogo/local.h src/algs/stogo/stogo_config.h src/algs/stogo/stogo.h src/algs/stogo/tools.h) endif () +if (NLOPT_CXX11) + list (APPEND NLOPT_SOURCES src/algs/ags/data_types.hpp src/algs/ags/evolvent.hpp src/algs/ags/evolvent.cc src/algs/ags/solver.hpp src/algs/ags/solver.cc + src/algs/ags/local_optimizer.hpp src/algs/ags/local_optimizer.cc src/algs/ags/ags.h src/algs/ags/ags.cc) +endif () install (FILES ${NLOPT_HEADERS} DESTINATION ${RELATIVE_INSTALL_INCLUDE_DIR}) @@ -219,6 +228,7 @@ target_include_directories (${nlopt_lib} PRIVATE ${PROJECT_BINARY_DIR}/src/api ${PROJECT_BINARY_DIR} src/algs/stogo + src/algs/ags src/util src/algs/direct src/algs/cdirect diff --git a/doc/docs/NLopt_Algorithms.md b/doc/docs/NLopt_Algorithms.md index ba5ff25d..59fddffe 100644 --- a/doc/docs/NLopt_Algorithms.md +++ b/doc/docs/NLopt_Algorithms.md @@ -31,7 +31,7 @@ Better yet, run some algorithm for a really long time until the minimum *f* Global optimization ------------------- -All of the global-optimization algorithms currently require you to specify bound constraints on all the optimization parameters. Of these algorithms, only ISRES and ORIG_DIRECT support nonlinear inequality constraints, and only ISRES supports nonlinear equality constraints. (However, any of them can be applied to nonlinearly constrained problems by combining them with the [augmented Lagrangian method](#Augmented_Lagrangian_algorithm.md) below.) +All of the global-optimization algorithms currently require you to specify bound constraints on all the optimization parameters. Of these algorithms, only ISRES, AGS, and ORIG_DIRECT support nonlinear inequality constraints, and only ISRES supports nonlinear equality constraints. (However, any of them can be applied to nonlinearly constrained problems by combining them with the [augmented Lagrangian method](#Augmented_Lagrangian_algorithm.md) below.) **Something you should consider** is that, after running the global optimization, it is often worthwhile to then use the global optimum as a starting point for a local optimization to "polish" the optimum to a greater accuracy. (Many of the global optimization algorithms devote more effort to searching the global parameter space than in finding the precise position of the local optimum accurately.) @@ -123,6 +123,24 @@ Some references on StoGO are: Only bound-constrained problems are supported by this algorithm. +### AGS + +This algorithm adapted from [this repo](https://github.com/sovrasov/glob_search_nlp_solver). +AGS can handle arbitrary objectives and nonlinear inequality constraints. Also bound constraints are required for this method. To guarantee convergence, objectives and constraints should satisfy the Lipschitz condition on the specified hyperrectangle. +AGS is derivative-free and employs the Hilbert curve to reduce the source problem to the univariate one. The algorithm divides the univariate space into intervals, generating new points by using posterior probabilities. On each trial AGS tries to evaluate the constraints consequently one by one. If some constraint is violated at this point, the next ones won't be evaluated. If all constraints are preserved, i.e. the trial point is feasible, AGS will evaluate the objective. Thus, some of constraints (except the first one) and objective can be partially undefined inside the search hyperrectangle. Current implementation of AGS doesn't support vector constraints. + +Limitations of the machine arithmetic don't allow to build a tight approximation for Hilbert when the space dimension is greater than 5, so this implementation of AGS is restricted in that sense. It supports up to 10 dimensions, but the method can stop early in case of 6 and more ones. + +AGS, like StoGO, is written in C++, but it requires C++11. If the library is built with [C++](NLopt_Installation.md) and compiler supports C++11, AGS will be built too. + +AGS is specified within NLopt by `NLOPT_GN_AGS`. Additional parameters of AGS which are not adjustable from the common NLOpt interface are declared and described in `ags.h`. Also an example of solving a constrained problem is given in the AGS source folder. +References: +- Yaroslav D. Sergeyev, Dmitri L. Markin: An algorithm for solving global optimization problems with nonlinear constraints, Journal of Global Optimization, 7(4), pp 407–419, 1995 +- Strongin R.G., Sergeyev Ya.D., 2000. Global optimization with non-convex constraints. Sequential and parallel algorithms. Kluwer Academic +Publishers, Dordrecht. +- Gergel V. and Lebedev I.: Heterogeneous Parallel Computations for Solving Global Optimization Problems. Proc. Comput. Science 66, pp. 53–62 (2015) +- [Implementation](https://github.com/sovrasov/multicriterial-go) of AGS for constrained multi-objective problems. + ### ISRES (Improved Stochastic Ranking Evolution Strategy) This is my implementation of the "Improved Stochastic Ranking Evolution Strategy" (ISRES) algorithm for nonlinearly-constrained global optimization (or at least semi-global; although it has heuristics to escape local optima, I'm not aware of a convergence proof), based on the method described in: diff --git a/nlopt_config.h.in b/nlopt_config.h.in index c5dd6858..464c60b3 100644 --- a/nlopt_config.h.in +++ b/nlopt_config.h.in @@ -1,16 +1,16 @@ /*============================================================================== # NLOPT CMake configuration file -# -# NLopt is a free/open-source library for nonlinear optimization, providing -# a common interface for a number of different free optimization routines -# available online as well as original implementations of various other +# +# NLopt is a free/open-source library for nonlinear optimization, providing +# a common interface for a number of different free optimization routines +# available online as well as original implementations of various other # algorithms -# WEBSITE: http://ab-initio.mit.edu/wiki/index.php/NLopt +# WEBSITE: http://ab-initio.mit.edu/wiki/index.php/NLopt # AUTHOR: Steven G. Johnson # # This config.cmake.h.in file was created to compile NLOPT with the CMAKE utility. # Benoit Scherrer, 2010 CRL, Harvard Medical School -# Copyright (c) 2008-2009 Children's Hospital Boston +# Copyright (c) 2008-2009 Children's Hospital Boston # # Minor changes to the source was applied to make possible the compilation with # Cmake under Linux/Win32 @@ -150,6 +150,9 @@ /* Define if compiled including C++-based routines */ #cmakedefine NLOPT_CXX +/* Define if compiled including C++11-based routines */ +#cmakedefine NLOPT_CXX11 + /* Define to empty if `const' does not conform to ANSI C. */ #undef const diff --git a/src/algs/ags/ags.cc b/src/algs/ags/ags.cc new file mode 100644 index 00000000..63f3a3ee --- /dev/null +++ b/src/algs/ags/ags.cc @@ -0,0 +1,108 @@ +// A C-callable front-end to the AGS global-optimization library. +// -- Vladislav Sovrasov + +#include "ags.h" +#include "solver.hpp" +#include +#include +#include + +double ags_eps = 0; +double ags_r = 3; +double eps_res = 0.001; +unsigned evolvent_density = 12; +int ags_refine_loc = 0; +int ags_verbose = 0; + +int ags_minimize(unsigned n, nlopt_func func, void *data, unsigned m, nlopt_constraint *fc, + double *x, double *minf, const double *l, const double *u, nlopt_stopping *stop) +{ + int ret_code = NLOPT_SUCCESS; + + if (n > ags::solverMaxDim) + return NLOPT_INVALID_ARGS; + if(m != nlopt_count_constraints(m, fc) || m > ags::solverMaxConstraints) + return NLOPT_INVALID_ARGS; + + if (ags_verbose && n > 5) + std::cout << "Warning: AGS is unstable when dimension > 5" << std::endl; + + std::vector lb(l, l + n); + std::vector ub(u, u + n); + std::vector functions; + for (unsigned i = 0; i < m; i++) + { + if (fc[i].m != 1) + return NLOPT_INVALID_ARGS; + functions.push_back([fc, data, n, i](const double* x) { + double val = 0; + nlopt_eval_constraint(&val, NULL, &fc[i], n, x); + return val; + }); + } + functions.push_back([func, data, n, stop](const double* x) { + ++ *(stop->nevals_p); + return func(n, x, NULL, data);}); + + ags::SolverParameters params; + params.r = ags_r; + params.itersLimit = stop->maxeval != 0 ? stop->maxeval : 5000; + params.eps = ags_eps; + params.evolventDensity = evolvent_density; + params.epsR = eps_res; + params.stopVal = stop->minf_max; + params.refineSolution = (bool)ags_refine_loc; + + ags::NLPSolver solver; + solver.SetParameters(params); + solver.SetProblem(functions, lb, ub); + + ags::Trial optPoint; + try + { + auto external_stop_func = [stop, &ret_code](){ + if (nlopt_stop_time(stop)) { + ret_code = NLOPT_MAXTIME_REACHED; + return true; + } + else return false; + }; + optPoint = solver.Solve(external_stop_func); + } + catch (const std::exception& exp) + { + std::cerr << "AGS internal error: " << std::string(exp.what()) << std::endl; + return NLOPT_FAILURE; + } + + if (ags_verbose) + { + auto calcCounters = solver.GetCalculationsStatistics(); + auto holderConstEstimations = solver.GetHolderConstantsEstimations(); + + std::cout << std::string(20, '-') << "AGS statistics: " << std::string(20, '-') << std::endl; + for (size_t i = 0; i < calcCounters.size() - 1; i++) + std::cout << "Number of calculations of constraint # " << i << ": " << calcCounters[i] << "\n"; + std::cout << "Number of calculations of objective: " << calcCounters.back() << "\n";; + + for (size_t i = 0; i < holderConstEstimations.size() - 1; i++) + std::cout << "Estimation of Holder constant of function # " << i << ": " << holderConstEstimations[i] << "\n"; + std::cout << "Estimation of Holder constant of objective: " << holderConstEstimations.back() << "\n"; + if (optPoint.idx != (int)m) + std::cout << "Feasible point not found" << "\n"; + std::cout << std::string(40, '-') << std::endl; + } + + if ((int)m == optPoint.idx) + { + memcpy(x, optPoint.y, n*sizeof(x[0])); + *minf = optPoint.g[optPoint.idx]; + } + else //feasible point not found. + return NLOPT_FAILURE; + + if (solver.GetCalculationsStatistics()[0] >= params.itersLimit) + return NLOPT_MAXEVAL_REACHED; + + return ret_code; +} diff --git a/src/algs/ags/ags.h b/src/algs/ags/ags.h new file mode 100644 index 00000000..809340a6 --- /dev/null +++ b/src/algs/ags/ags.h @@ -0,0 +1,31 @@ +/* A C-callable front-end to the AGS global-optimization library. + -- Vladislav Sovrasov */ + +#ifndef AGS_H +#define AGS_H + +#include "nlopt-util.h" + +#ifdef __cplusplus +extern "C" { +#endif + +//The algorithm supports 3 types of stop criterions: stop by execution time, stop by value and stop by exceeding limit of iterations. + +int ags_minimize(unsigned n, nlopt_func func, void *data, unsigned m, nlopt_constraint *fc, + double *x, double *minf, const double *l, const double *u, nlopt_stopping *stop); + +extern double ags_eps; //method tolerance in Holder metric on 1d interval. Less value -- better search precision, less probability of early stop. +extern double ags_r; //reliability parameter. Higher value of r -- slower convergence, higher chance to cache the global minima. +extern double eps_res; // parameter which prevents method from paying too much attention to constraints. Greater values of this parameter speed up convergence, +// but global minima can be lost. +extern unsigned evolvent_density; // density of evolvent. By default density is 2^-12 on hybercube [0,1]^N, +// which means that maximum search accuracyis 2^-12. If search hypercube is large the density can be increased accordingly to achieve better accuracy. +extern int ags_refine_loc; //refine the final optimum using built-in local optimizer +extern int ags_verbose; //print additional info + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/algs/ags/data_types.hpp b/src/algs/ags/data_types.hpp new file mode 100644 index 00000000..9b29d8e2 --- /dev/null +++ b/src/algs/ags/data_types.hpp @@ -0,0 +1,75 @@ +/* +Copyright (C) 2018 Sovrasov V. - All Rights Reserved + * You may use, distribute and modify this code under the + * terms of the MIT license. + * You should have received a copy of the MIT license with + * this file. If not visit https://opensource.org/licenses/MIT +*/ +#pragma once + +#include +#include + +#define NLP_SOLVER_ERROR(msg) throw std::runtime_error(std::string(msg)) +#define NLP_SOLVER_ASSERT(expr, msg) if(!(expr)) NLP_SOLVER_ERROR(msg) + +namespace ags +{ + +const unsigned solverMaxDim = 10; +const unsigned solverMaxConstraints = 10; + +template +class IGOProblem +{ +public: + ~IGOProblem() {} + + virtual fptype Calculate(const fptype* y, int fNumber) const = 0; + virtual int GetConstraintsNumber() const = 0; + virtual int GetDimension() const = 0; + virtual void GetBounds(fptype* left, fptype* right) const = 0; + virtual int GetOptimumPoint(fptype* y) const = 0; + virtual fptype GetOptimumValue() const = 0 ; +}; + +struct Trial +{ + double x; + double y[solverMaxDim]; + double g[solverMaxConstraints + 1]; + int idx; + Trial() {} + Trial(double _x) : x(_x) {} +}; + +struct Interval +{ + Trial pl; + Trial pr; + double R; + double delta; + Interval() {} + Interval(const Trial& _pl, const Trial& _pr) : pl(_pl), pr(_pr) {} +}; + +struct CompareIntervals +{ + bool operator() (const Interval* i1, const Interval* i2) const + { + return i1->pl.x < i2->pl.x; + } +}; + +class CompareByR +{ +public: + bool operator() (const Interval* i1, const Interval* i2) const + { + return i1->R < i2->R; + } +}; + + + +} diff --git a/src/algs/ags/evolvent.cc b/src/algs/ags/evolvent.cc new file mode 100644 index 00000000..b72cf86d --- /dev/null +++ b/src/algs/ags/evolvent.cc @@ -0,0 +1,201 @@ +/* +Copyright (C) 2018 Sovrasov V. - All Rights Reserved + * You may use, distribute and modify this code under the + * terms of the MIT license. + * You should have received a copy of the MIT license with + * this file. If not visit https://opensource.org/licenses/MIT +*/ +#include "evolvent.hpp" +#include +#include +#include + +using namespace ags; + +void mapd(double x, int m, double* y, int n, int key = 1); /* map x to y */ + +Evolvent::Evolvent() +{ + mIsInitialized = false; +} + +Evolvent::~Evolvent() +{ +} + +Evolvent::Evolvent(int dimension, int tightness, const double* lb, const double* ub) +{ + assert(tightness > 2); + + mDimension = dimension; + mTightness = tightness; + + mShiftScalars.resize(mDimension); + mRho.resize(mDimension); + for (int i = 0; i < mDimension; i++) + { + mRho[i] = ub[i] - lb[i]; + mShiftScalars[i] = 0.5*(lb[i] + ub[i]); + } + + mIsInitialized = true; +} + +void Evolvent::TransformToStandardCube(const double *y, double *z) +{ + for (int i = 0; i < mDimension; i++) + z[i] = (y[i] - mShiftScalars[i]) / mRho[i]; +} + +void Evolvent::TransformToSearchDomain(const double *y, double *z) +{ + for (int i = 0; i < mDimension; i++) + z[i] = mRho[i]*y[i] + mShiftScalars[i]; +} + +void Evolvent::GetImage(double x, double y[]) +{ + if(mDimension != 1) + mapd(x, mTightness, y, mDimension); + else + y[0] = x - 0.5; + + TransformToSearchDomain(y, y); +} + +void mapd(double x, int m, double* y, int n, int key) +{ + /* mapping y(x) : 1 - center, 2 - line, 3 - node */ + // use key = 1 + + int n1, nexp, l, iq, iu[10], iv[10]; + double d, mne, dd, dr;//,tmp; + double p, r; + int iw[11]; + int it, is = 0, i, j, k; + void node(int is, int n1, int nexp, int& l, int& iq, int iu[], int iv[]); + + p = 0.0; + n1 = n - 1; + for (nexp = 1, i = 0; i 2) { + dr = mne / nexp; + dr = dr - fmod(dr, 1.0); + //dr=(dr>0)?floor(dr):ceil(dr); + dd = mne - dr; + dr = d*dd; + dd = dr - fmod(dr, 1.0); + //dd=(dr>0)?floor(dr):ceil(dr); + dr = dd + (dd - 1) / (nexp - 1); + dd = dr - fmod(dr, 1.0); + //dd=(dr>0)?floor(dr):ceil(dr); + d = dd*(1. / (mne - 1.0)); + } + + for (j = 0; j0) || ((iq == 0) && (is == 0))) k = l; + else if (iq<0) k = (it == n1) ? 0 : n1; + r = r*0.5; + it = l; + for (i = 0; i= iff) { + if ((is == iff) && (is != 1)) { l = i; iq = -1; } + is = is - iff; + k2 = 1; + } + else { + k2 = -1; + if ((is == (iff - 1)) && (is != 0)) { l = i; iq = 1; } + } + j = -k1*k2; + iv[i] = j; + iu[i] = j; + k1 = k2; + } + iv[l] = iv[l] * iq; + iv[n1] = -iv[n1]; + } +} diff --git a/src/algs/ags/evolvent.hpp b/src/algs/ags/evolvent.hpp new file mode 100644 index 00000000..84b80c42 --- /dev/null +++ b/src/algs/ags/evolvent.hpp @@ -0,0 +1,37 @@ +/* +Copyright (C) 2018 Sovrasov V. - All Rights Reserved + * You may use, distribute and modify this code under the + * terms of the MIT license. + * You should have received a copy of the MIT license with + * this file. If not visit https://opensource.org/licenses/MIT +*/ +#pragma once + +#include + +namespace ags +{ + +class Evolvent +{ +protected: + int mDimension; + int mTightness; + + std::vector mRho; + std::vector mShiftScalars; + + void TransformToStandardCube(const double *y, double *z); + void TransformToSearchDomain(const double *y, double *z); + + bool mIsInitialized; + +public: + Evolvent(); + Evolvent(int dimension, int tightness, const double* lb, const double* ub); + ~Evolvent(); + + virtual void GetImage(double x, double y[]); +}; + +} diff --git a/src/algs/ags/local_optimizer.cc b/src/algs/ags/local_optimizer.cc new file mode 100644 index 00000000..6c5f2538 --- /dev/null +++ b/src/algs/ags/local_optimizer.cc @@ -0,0 +1,135 @@ +/* +Copyright (C) 2018 Sovrasov V. - All Rights Reserved + * You may use, distribute and modify this code under the + * terms of the MIT license. + * You should have received a copy of the MIT license with + * this file. If not visit https://opensource.org/licenses/MIT +*/ +#include "local_optimizer.hpp" + +#include +#include +#include + +using namespace ags; + +#define MAX_LOCAL_ITERATIONS_NUMBER 20 + +void HookeJeevesOptimizer::SetParameters(double eps, double step, double stepMult) +{ + NLP_SOLVER_ASSERT(eps > 0 && step > 0 && stepMult > 0, "Wrong papameters of the local optimizer"); + mEps = eps; + mStep = step; + mStepMultiplier = stepMult; +} + +Trial HookeJeevesOptimizer::Optimize(std::shared_ptr> problem, + const Trial& startPoint, std::vector& trialsCounters) +{ + mProblem = problem; + mStartPoint = startPoint; + mTrialsCounters = std::vector(mProblem->GetConstraintsNumber() + 1, 0); + + int k = 0, i=0; + bool needRestart = true; + double currentFValue, nextFValue; + + while (i < MAX_LOCAL_ITERATIONS_NUMBER) { + i++; + if (needRestart) { + k = 0; + mCurrentPoint = mStartPoint; + mCurrentResearchDirection = mStartPoint; + currentFValue = ComputeObjective(mCurrentPoint.y); + needRestart = false; + } + + std::swap(mPreviousResearchDirection, mCurrentResearchDirection); + mCurrentResearchDirection = mCurrentPoint; + nextFValue = MakeResearch(mCurrentResearchDirection.y); + + if (currentFValue > nextFValue) { + DoStep(); + k++; + currentFValue = nextFValue; + } + else if (mStep > mEps) { + if (k != 0) + std::swap(mStartPoint, mPreviousResearchDirection); + else + mStep /= mStepMultiplier; + needRestart = true; + } + else + break; + } + + mPreviousResearchDirection.idx = 0; + while (mPreviousResearchDirection.idx < mProblem->GetConstraintsNumber()) + { + mTrialsCounters[mPreviousResearchDirection.idx]++; + mPreviousResearchDirection.g[mPreviousResearchDirection.idx] = + mProblem->Calculate(mPreviousResearchDirection.y, mPreviousResearchDirection.idx); + if (mPreviousResearchDirection.g[mPreviousResearchDirection.idx] > 0) + break; + mPreviousResearchDirection.idx++; + } + + if (mPreviousResearchDirection.idx == mProblem->GetConstraintsNumber()) + { + mPreviousResearchDirection.g[mPreviousResearchDirection.idx] = + mProblem->Calculate(mPreviousResearchDirection.y, mPreviousResearchDirection.idx); + mTrialsCounters[mPreviousResearchDirection.idx]++; + } + + for(size_t i = 0; i < mTrialsCounters.size(); i++) + trialsCounters[i] += mTrialsCounters[i]; + + return mPreviousResearchDirection; +} + +void HookeJeevesOptimizer::DoStep() +{ + for (int i = 0; i < mProblem->GetDimension(); i++) + mCurrentPoint.y[i] = (1 + mStepMultiplier)*mCurrentResearchDirection.y[i] - + mStepMultiplier*mPreviousResearchDirection.y[i]; +} + +double HookeJeevesOptimizer::ComputeObjective(const double* x) const +{ + for (int i = 0; i <= mProblem->GetConstraintsNumber(); i++) + { + double value = mProblem->Calculate(x, i); + mTrialsCounters[i]++; + if (i < mProblem->GetConstraintsNumber() && value > 0) + return std::numeric_limits::max(); + else if (i == mProblem->GetConstraintsNumber()) + return value; + } + return std::numeric_limits::max(); +} + +double HookeJeevesOptimizer::MakeResearch(double* startPoint) +{ + double bestValue = ComputeObjective(startPoint); + + for (int i = 0; i < mProblem->GetDimension(); i++) + { + startPoint[i] += mStep; + double rightFvalue = ComputeObjective(startPoint); + + if (rightFvalue > bestValue) + { + startPoint[i] -= 2 * mStep; + double leftFValue = ComputeObjective(startPoint); + if (leftFValue > bestValue) + startPoint[i] += mStep; + else + bestValue = leftFValue; + } + else + bestValue = rightFvalue; + } + + return bestValue; +} diff --git a/src/algs/ags/local_optimizer.hpp b/src/algs/ags/local_optimizer.hpp new file mode 100644 index 00000000..03c45acb --- /dev/null +++ b/src/algs/ags/local_optimizer.hpp @@ -0,0 +1,44 @@ +/* +Copyright (C) 2018 Sovrasov V. - All Rights Reserved + * You may use, distribute and modify this code under the + * terms of the MIT license. + * You should have received a copy of the MIT license with + * this file. If not visit https://opensource.org/licenses/MIT +*/ +#pragma once + +#include "data_types.hpp" + +#include +#include + +namespace ags +{ + +class HookeJeevesOptimizer +{ +private: + double mEps; + double mStep; + double mStepMultiplier; + + mutable std::vector mTrialsCounters; + + std::shared_ptr> mProblem; + + Trial mCurrentPoint; + Trial mStartPoint; + Trial mCurrentResearchDirection; + Trial mPreviousResearchDirection; + + void DoStep(); + double ComputeObjective(const double* x) const; + double MakeResearch(double*); + +public: + void SetParameters(double eps, double step, double stepMult); + Trial Optimize(std::shared_ptr> problem, + const Trial& startPoint, std::vector& trialsCounters); +}; + +} diff --git a/src/algs/ags/solver.cc b/src/algs/ags/solver.cc new file mode 100644 index 00000000..289592de --- /dev/null +++ b/src/algs/ags/solver.cc @@ -0,0 +1,421 @@ +/* +Copyright (C) 2018 Sovrasov V. - All Rights Reserved + * You may use, distribute and modify this code under the + * terms of the MIT license. + * You should have received a copy of the MIT license with + * this file. If not visit https://opensource.org/licenses/MIT +*/ +#include "solver.hpp" + +#include +#include +#include + +using namespace ags; + +namespace +{ + const double zeroHLevel = 1e-12; + + class ProblemInternal : public IGOProblem + { + private: + std::vector mFunctions; + std::vector mLeftBound; + std::vector mRightBound; + + unsigned mDimension; + unsigned mConstraintsNumber; + + public: + ProblemInternal(const std::vector& functions, + const std::vector& leftBound, const std::vector& rightBound) + { + mFunctions = functions; + mConstraintsNumber = mFunctions.size() - 1; + mDimension = leftBound.size(); + mLeftBound = leftBound; + mRightBound = rightBound; + } + + double Calculate(const double* y, int fNumber) const + { + return mFunctions[fNumber](y); + } + int GetConstraintsNumber() const + { + return mConstraintsNumber; + } + int GetDimension() const + { + return mDimension; + } + void GetBounds(double* left, double* right) const + { + for(size_t i = 0; i < mDimension; i++) + { + left[i] = mLeftBound[i]; + right[i] = mRightBound[i]; + } + } + int GetOptimumPoint(double* y) const {return 0;} + double GetOptimumValue() const {return 0;} + }; +} + +NLPSolver::NLPSolver() {} + +void NLPSolver::SetParameters(const SolverParameters& params) +{ + mParameters = params; +} + +void NLPSolver::SetProblem(std::shared_ptr> problem) +{ + mProblem = problem; + NLP_SOLVER_ASSERT(mProblem->GetConstraintsNumber() <= (int)solverMaxConstraints, + "Current implementation supports up to " + std::to_string(solverMaxConstraints) + + " nonlinear inequality constraints"); + InitLocalOptimizer(); +} + +void NLPSolver::SetProblem(const std::vector& functions, + const std::vector& leftBound, const std::vector& rightBound) +{ + NLP_SOLVER_ASSERT(leftBound.size() == rightBound.size(), "Inconsistent dimensions of bounds"); + NLP_SOLVER_ASSERT(leftBound.size() > 0, "Zero problem dimension"); + mProblem = std::make_shared(functions, leftBound, rightBound); + NLP_SOLVER_ASSERT(mProblem->GetConstraintsNumber() <= (int)solverMaxConstraints, + "Current implementation supports up to " + std::to_string(solverMaxConstraints) + + " nonlinear inequality constraints"); + InitLocalOptimizer(); +} + +std::vector NLPSolver::GetCalculationsStatistics() const +{ + return mCalculationsCounters; +} + +std::vector NLPSolver::GetHolderConstantsEstimations() const +{ + return mHEstimations; +} + +void NLPSolver::InitLocalOptimizer() +{ + std::vector leftBound(mProblem->GetDimension()); + std::vector rightBound(mProblem->GetDimension()); + mProblem->GetBounds(leftBound.data(), rightBound.data()); + + double maxSize = 0; + for(size_t i = 0; i < leftBound.size(); i++) + maxSize = std::max(rightBound[i] - leftBound[i], maxSize); + + NLP_SOLVER_ASSERT(maxSize > 0, "Empty search domain"); + + mLocalOptimizer.SetParameters(maxSize / 1000, maxSize / 100, 2); +} + +void NLPSolver::InitDataStructures() +{ + double leftDomainBound[solverMaxDim], rightDomainBound[solverMaxDim]; + mProblem->GetBounds(leftDomainBound, rightDomainBound); + mEvolvent = Evolvent(mProblem->GetDimension(), mParameters.evolventDensity, + leftDomainBound, rightDomainBound); + + mNextPoints.resize(mParameters.numPoints); + mOptimumEstimation.idx = -1; + + mZEstimations.resize(mProblem->GetConstraintsNumber() + 1); + std::fill(mZEstimations.begin(), mZEstimations.end(), + std::numeric_limits::max()); + mNextIntervals.resize(mParameters.numPoints); + mHEstimations.resize(mProblem->GetConstraintsNumber() + 1); + std::fill(mHEstimations.begin(), mHEstimations.end(), 1.0); + mCalculationsCounters.resize(mProblem->GetConstraintsNumber() + 1); + std::fill(mCalculationsCounters.begin(), mCalculationsCounters.end(), 0); + mQueue = PriorityQueue(); + mIterationsCounter = 0; + mMinDelta = std::numeric_limits::max(); + mMaxIdx = -1; +} + +void NLPSolver::ClearDataStructures() +{ + for (const auto& ptr : mSearchInformation) + delete ptr; + mSearchInformation.clear(); + mQueue = PriorityQueue(); +} + +Trial NLPSolver::Solve() +{ + return Solve([](){return false;}); +} + +Trial NLPSolver::Solve(std::function externalStopFunc) +{ + mNeedStop = false; + InitDataStructures(); + FirstIteration(); + + do { + InsertIntervals(); + EstimateOptimum(); + if (mNeedRefillQueue || mQueue.size() < mParameters.numPoints) + RefillQueue(); + CalculateNextPoints(); + MakeTrials(); + mNeedStop = mNeedStop || mMinDelta < mParameters.eps || externalStopFunc(); + mIterationsCounter++; + } while(mIterationsCounter < mParameters.itersLimit && !mNeedStop); + + ClearDataStructures(); + + if (mParameters.refineSolution && mOptimumEstimation.idx == mProblem->GetConstraintsNumber()) { + auto localTrial = mLocalOptimizer.Optimize(mProblem, mOptimumEstimation, mCalculationsCounters); + int idx = mOptimumEstimation.idx; + if (localTrial.idx == idx && localTrial.g[idx] < mOptimumEstimation.g[idx]) + mOptimumEstimation = localTrial; + } + + return mOptimumEstimation; +} + +void NLPSolver::FirstIteration() +{ + Trial leftBound = Trial(0); + leftBound.idx = -1; + Trial rightBound = Trial(1.); + rightBound.idx = -1; + + for (size_t i = 1; i <= mParameters.numPoints; i++) + { + mNextPoints[i - 1] = Trial((double)i / (mParameters.numPoints + 1)); + mEvolvent.GetImage(mNextPoints[i - 1].x, mNextPoints[i - 1].y); + } + + MakeTrials(); + EstimateOptimum(); + + for (size_t i = 0; i <= mParameters.numPoints; i++) + { + Interval* pNewInterval; + if (i == 0) + pNewInterval = new Interval(leftBound, mNextPoints[i]); + else if (i == mParameters.numPoints) + pNewInterval = new Interval(mNextPoints[i - 1], rightBound); + else + pNewInterval = new Interval(mNextPoints[i - 1], mNextPoints[i]); + pNewInterval->delta = pow(pNewInterval->pr.x - pNewInterval->pl.x, + 1. / mProblem->GetDimension()); + mMinDelta = std::min(mMinDelta, pNewInterval->delta); + auto insRes = mSearchInformation.insert(pNewInterval); + UpdateAllH(insRes.first); + } + RefillQueue(); + CalculateNextPoints(); + MakeTrials(); + mIterationsCounter += 2; +} + +void NLPSolver::MakeTrials() +{ + for (size_t i = 0; i < mNextPoints.size(); i++) + { + int idx = 0; + while(idx < mProblem->GetConstraintsNumber()) + { + mNextPoints[i].idx = idx; + double val = mProblem->Calculate(mNextPoints[i].y, idx); + mCalculationsCounters[idx]++; + mNextPoints[i].g[idx] = val; + if (val > 0) + break; + idx++; + } + + if(idx > mMaxIdx) + { + mMaxIdx = idx; + for(int i = 0; i < mMaxIdx; i++) + mZEstimations[i] = -mParameters.epsR*mHEstimations[i]; + mNeedRefillQueue = true; + } + if (idx == mProblem->GetConstraintsNumber()) + { + mCalculationsCounters[idx]++; + mNextPoints[i].idx = idx; + mNextPoints[i].g[idx] = mProblem->Calculate(mNextPoints[i].y, idx); + } + if(mNextPoints[i].idx == mMaxIdx && + mNextPoints[i].g[mMaxIdx] < mZEstimations[mMaxIdx]) + { + mZEstimations[mMaxIdx] = mNextPoints[i].g[mMaxIdx]; + mNeedRefillQueue = true; + } + } +} + +void NLPSolver::InsertIntervals() +{ + for (size_t i = 0; i < mParameters.numPoints; i++) + { + Interval* pOldInterval = mNextIntervals[i]; + Interval* pNewInterval = new Interval(mNextPoints[i], pOldInterval->pr); + pOldInterval->pr = mNextPoints[i]; + pOldInterval->delta = pow(pOldInterval->pr.x - pOldInterval->pl.x, + 1. / mProblem->GetDimension()); + pNewInterval->delta = pow(pNewInterval->pr.x - pNewInterval->pl.x, + 1. / mProblem->GetDimension()); + mMinDelta = std::min(mMinDelta, pNewInterval->delta); + mMinDelta = std::min(mMinDelta, pOldInterval->delta); + + auto insResult = mSearchInformation.insert(pNewInterval); + bool wasInserted = insResult.second; + if(!wasInserted) + throw std::runtime_error("Error during interval insertion."); + + UpdateAllH(insResult.first); + UpdateAllH(--insResult.first); + + if(!mNeedRefillQueue) + { + pNewInterval->R = CalculateR(pNewInterval); + mNextIntervals[i]->R = CalculateR(mNextIntervals[i]); + mQueue.push(pNewInterval); + mQueue.push(pOldInterval); + } + } +} + +void NLPSolver::CalculateNextPoints() +{ + for(size_t i = 0; i < mParameters.numPoints; i++) + { + mNextIntervals[i] = mQueue.top(); + mQueue.pop(); + mNextPoints[i].x = GetNextPointCoordinate(mNextIntervals[i]); + + if (mNextPoints[i].x >= mNextIntervals[i]->pr.x || mNextPoints[i].x <= mNextIntervals[i]->pl.x) + mNeedStop = true; + //std::cout << "Warning: resolution of evolvent is not enough to continue the search"; + + mEvolvent.GetImage(mNextPoints[i].x, mNextPoints[i].y); + } +} + +void NLPSolver::RefillQueue() +{ + mQueue = PriorityQueue(); + for (const auto& pInterval : mSearchInformation) + { + pInterval->R = CalculateR(pInterval); + mQueue.push(pInterval); + } + mNeedRefillQueue = false; +} + +void NLPSolver::EstimateOptimum() +{ + for (size_t i = 0; i < mNextPoints.size(); i++) + { + if (mOptimumEstimation.idx < mNextPoints[i].idx || + (mOptimumEstimation.idx == mNextPoints[i].idx && + mOptimumEstimation.g[mOptimumEstimation.idx] > mNextPoints[i].g[mNextPoints[i].idx])) + { + mOptimumEstimation = mNextPoints[i]; + mNeedRefillQueue = true; + if (mOptimumEstimation.idx == mProblem->GetConstraintsNumber() && + mOptimumEstimation.g[mOptimumEstimation.idx] < mParameters.stopVal) + mNeedStop = true; + } + } +} + +void NLPSolver::UpdateH(double newValue, int index) +{ + if (newValue > mHEstimations[index] || (mHEstimations[index] == 1.0 && newValue > zeroHLevel)) + { + mHEstimations[index] = newValue; + mNeedRefillQueue = true; + } +} + +void NLPSolver::UpdateAllH(std::set::iterator iterator) +{ + Interval* pInterval = *iterator; + if (pInterval->pl.idx < 0) + return; + + if (pInterval->pl.idx == pInterval->pr.idx) + UpdateH(fabs(pInterval->pr.g[pInterval->pr.idx] - pInterval->pl.g[pInterval->pl.idx]) / + pInterval->delta, pInterval->pl.idx); + else + { + auto rightIterator = iterator; + auto leftIterator = iterator; + //right lookup + ++rightIterator; + while(rightIterator != mSearchInformation.end() && (*rightIterator)->pl.idx < pInterval->pl.idx) + ++rightIterator; + if (rightIterator != mSearchInformation.end() && (*rightIterator)->pl.idx >= pInterval->pl.idx) + { + int idx = pInterval->pl.idx; + UpdateH(fabs((*rightIterator)->pl.g[idx] - pInterval->pl.g[idx]) / + pow((*rightIterator)->pl.x - pInterval->pl.x, 1. / mProblem->GetDimension()), idx); + } + + //left lookup + --leftIterator; + while(leftIterator != mSearchInformation.begin() && (*leftIterator)->pl.idx < pInterval->pl.idx) + --leftIterator; + if (leftIterator != mSearchInformation.begin() && (*leftIterator)->pl.idx >= pInterval->pl.idx) + { + int idx = pInterval->pl.idx; + UpdateH(fabs((*leftIterator)->pl.g[idx] - pInterval->pl.g[idx]) / + pow(pInterval->pl.x - (*leftIterator)->pl.x, 1. / mProblem->GetDimension()), idx); + } + } +} + +double NLPSolver::CalculateR(Interval* i) const +{ + if(i->pl.idx == i->pr.idx) + { + const int v = i->pr.idx; + return i->delta + pow((i->pr.g[v] - i->pl.g[v]) / (mParameters.r * mHEstimations[v]), 2) / i->delta - + 2.*(i->pr.g[v] + i->pl.g[v] - 2*mZEstimations[v]) / (mParameters.r * mHEstimations[v]); + } + else if(i->pl.idx < i->pr.idx) + return 2*i->delta - 4*(i->pr.g[i->pr.idx] - mZEstimations[i->pr.idx]) / (mParameters.r * mHEstimations[i->pr.idx]); + else + return 2*i->delta - 4*(i->pl.g[i->pl.idx] - mZEstimations[i->pl.idx]) / (mParameters.r * mHEstimations[i->pl.idx]); +} + +double NLPSolver::GetNextPointCoordinate(Interval* i) const +{ + double x; + if(i->pr.idx == i->pl.idx) + { + const int v = i->pr.idx; + double dg = i->pr.g[v] - i->pl.g[v]; + x = 0.5 * (i->pr.x + i->pl.x) - + 0.5*((dg > 0.) ? 1. : -1.) * pow(fabs(dg) / mHEstimations[v], mProblem->GetDimension()) / mParameters.r; + } + else + x = 0.5 * (i->pr.x + i->pl.x); + + return x; +} + +bool solver_utils::checkVectorsDiff(const double* y1, const double* y2, size_t dim, double eps) +{ + for (size_t i = 0; i < dim; i++) + { + if (fabs(y1[i] - y2[i]) > eps) + return true; + } + + return false; +} diff --git a/src/algs/ags/solver.hpp b/src/algs/ags/solver.hpp new file mode 100644 index 00000000..4c57df46 --- /dev/null +++ b/src/algs/ags/solver.hpp @@ -0,0 +1,107 @@ +/* +Copyright (C) 2018 Sovrasov V. - All Rights Reserved + * You may use, distribute and modify this code under the + * terms of the MIT license. + * You should have received a copy of the MIT license with + * this file. If not visit https://opensource.org/licenses/MIT +*/ +#pragma once + +#include "data_types.hpp" +#include "evolvent.hpp" +#include "local_optimizer.hpp" + +#include +#include +#include +#include +#include +#include + +namespace ags +{ + +struct SolverParameters +{ + double eps = 0.01; //method tolerance in Holder metric on 1d interval. Less value -- better search precision, less probability of early stop. + double stopVal = std::numeric_limits::lowest(); //method stops after objective becomes less than this value + double r = 3; //reliability parameter. Higher value of r -- slower convergence, higher chance to cache the global minima. + unsigned numPoints = 1; //number of new points per iteration. > 1 is useless in current implementation. + unsigned itersLimit = 20000; // max number of iterations. + unsigned evolventDensity = 12; // density of evolvent. By default density is 2^-12 on hybercube [0,1]^N, + // which means that maximum search accuracyis 2^-12. If search hypercube is large the density can be increased accordingly to achieve better accuracy. + double epsR = 0.001; // parameter which prevents method from paying too much attention to constraints. Greater values of this parameter speed up convergence, + // but global minima can be lost. + bool refineSolution = false; //if true, the fibal solution will be refined with the HookJeves method. + + SolverParameters() {} + SolverParameters(double _eps, double _r, + double epsR_, unsigned _trialsLimit) : + eps(_eps), r(_r), itersLimit(_trialsLimit), epsR(epsR_) + {} +}; + +class NLPSolver +{ +protected: + using PriorityQueue = + std::priority_queue, CompareByR>; + + HookeJeevesOptimizer mLocalOptimizer; + + SolverParameters mParameters; + std::shared_ptr> mProblem; + Evolvent mEvolvent; + + std::vector mHEstimations; + std::vector mZEstimations; + std::vector mNextPoints; + PriorityQueue mQueue; + std::set mSearchInformation; + std::vector mNextIntervals; + Trial mOptimumEstimation; + + std::vector mCalculationsCounters; + unsigned mIterationsCounter; + bool mNeedRefillQueue; + bool mNeedStop; + double mMinDelta; + int mMaxIdx; + + void InitLocalOptimizer(); + void FirstIteration(); + void MakeTrials(); + void InsertIntervals(); + void CalculateNextPoints(); + void RefillQueue(); + void EstimateOptimum(); + + void InitDataStructures(); + void ClearDataStructures(); + + void UpdateAllH(std::set::iterator); + void UpdateH(double newValue, int index); + double CalculateR(Interval*) const; + double GetNextPointCoordinate(Interval*) const; + +public: + using FuncPtr = std::function; + NLPSolver(); + + void SetParameters(const SolverParameters& params); + void SetProblem(std::shared_ptr> problem); + void SetProblem(const std::vector& functions, + const std::vector& leftBound, const std::vector& rightBound); + + Trial Solve(std::function externalStopFunc); + Trial Solve(); + std::vector GetCalculationsStatistics() const; + std::vector GetHolderConstantsEstimations() const; +}; + +namespace solver_utils +{ + bool checkVectorsDiff(const double* y1, const double* y2, size_t dim, double eps); +} + +} diff --git a/src/algs/ags/tst.cc b/src/algs/ags/tst.cc new file mode 100644 index 00000000..cbf6e995 --- /dev/null +++ b/src/algs/ags/tst.cc @@ -0,0 +1,58 @@ +#define _USE_MATH_DEFINES +#include +#include +#include + +#include "nlopt.hpp" + +double f_obj(const std::vector& x, std::vector&, void*) +{ + return -1.5*pow(x[0], 2) * exp(1 - pow(x[0], 2) + - 20.25*pow(x[0] - x[1], 2)) - pow(0.5 * (x[1] - 1)*(x[0]- 1), 4) + * exp(2 - pow(0.5 * (x[0] - 1), 4) - pow(x[1] - 1, 4)); +} + +double f_c0(const std::vector& x, std::vector&, void*) +{ + return 0.01*(pow(x[0] - 2.2, 2) + pow(x[1] - 1.2, 2) - 2.25); +} +double f_c1(const std::vector& x, std::vector&, void*) +{ + return 100 * (1 - pow(x[0] - 2, 2) / 1.44 - pow(0.5*x[1], 2)); +} +double f_c2(const std::vector& x, std::vector&, void*) +{ + return 10 * (x[1] - 1.5 - 1.5*sin(2*M_PI*(x[0] - 1.75))); +} + +int ags_verbose = 1; +double ags_eps = 0.001; +double eps_res = 0.1; + +int main(int argc, char **argv) +{ + nlopt::opt opt(nlopt::GN_AGS, 2); + + opt.set_lower_bounds({0, -1}); + opt.set_upper_bounds({4, 3}); + + opt.add_inequality_constraint(f_c0, NULL, 0); + opt.add_inequality_constraint(f_c1, NULL, 0); + opt.add_inequality_constraint(f_c2, NULL, 0); + opt.set_min_objective(f_obj, NULL); + opt.set_maxeval(2000); + + double minf; + std::vector x(2); + + try { + nlopt::result result = opt.optimize(x, minf); + std::cout << "found minimum at f(" << x[0] << "," << x[1] << ") = " + << std::setprecision(10) << minf << std::endl; + } + catch(std::exception &e) { + std::cout << "nlopt failed: " << e.what() << std::endl; + } + + return EXIT_SUCCESS; +} diff --git a/src/api/general.c b/src/api/general.c index 410771b5..6e75074a 100644 --- a/src/api/general.c +++ b/src/api/general.c @@ -7,17 +7,17 @@ * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: - * + * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "nlopt-internal.h" @@ -81,7 +81,12 @@ static const char nlopt_algorithm_names[NLOPT_NUM_ALGORITHMS][256] = { "Multi-level single-linkage (MLSL), quasi-random (global, needs sub-algorithm)", "Sequential Quadratic Programming (SQP) (local, derivative)", "CCSA (Conservative Convex Separable Approximations) with simple quadratic approximations (local, derivative)", - "ESCH evolutionary strategy" + "ESCH evolutionary strategy", +#ifdef NLOPT_CXX11 + "AGS (global, no-derivative)" +#else + "AGS (NOT COMPILED)" +#endif }; const char * NLOPT_STDCALL nlopt_algorithm_name(nlopt_algorithm a) diff --git a/src/api/nlopt.h b/src/api/nlopt.h index 328b6d09..998db3cd 100644 --- a/src/api/nlopt.h +++ b/src/api/nlopt.h @@ -7,17 +7,17 @@ * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: - * + * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef NLOPT_H @@ -67,7 +67,7 @@ typedef void (*nlopt_mfunc)(unsigned m, double *result, double *gradient, /* NULL if not needed */ void *func_data); -/* A preconditioner, which preconditions v at x to return vpre. +/* A preconditioner, which preconditions v at x to return vpre. (The meaning of "preconditioning" is algorithm-dependent.) */ typedef void (*nlopt_precond)(unsigned n, const double *x, const double *v, double *vpre, void *data); @@ -75,10 +75,10 @@ typedef void (*nlopt_precond)(unsigned n, const double *x, const double *v, typedef enum { /* Naming conventions: - NLOPT_{G/L}{D/N}_* - = global/local derivative/no-derivative optimization, - respectively - + NLOPT_{G/L}{D/N}_* + = global/local derivative/no-derivative optimization, + respectively + *_RAND algorithms involve some randomization. *_NOSCAL algorithms are *not* scaled to a unit hypercube @@ -151,6 +151,8 @@ typedef enum { NLOPT_GN_ESCH, + NLOPT_GN_AGS, + NLOPT_NUM_ALGORITHMS /* not an algorithm, just the number of them */ } nlopt_algorithm; @@ -198,9 +200,9 @@ NLOPT_EXTERN(nlopt_opt) nlopt_copy(const nlopt_opt opt); NLOPT_EXTERN(nlopt_result) nlopt_optimize(nlopt_opt opt, double *x, double *opt_f); -NLOPT_EXTERN(nlopt_result) nlopt_set_min_objective(nlopt_opt opt, nlopt_func f, +NLOPT_EXTERN(nlopt_result) nlopt_set_min_objective(nlopt_opt opt, nlopt_func f, void *f_data); -NLOPT_EXTERN(nlopt_result) nlopt_set_max_objective(nlopt_opt opt, nlopt_func f, +NLOPT_EXTERN(nlopt_result) nlopt_set_max_objective(nlopt_opt opt, nlopt_func f, void *f_data); NLOPT_EXTERN(nlopt_result) nlopt_set_precond_min_objective(nlopt_opt opt, nlopt_func f, nlopt_precond pre, void *f_data); @@ -213,12 +215,12 @@ NLOPT_EXTERN(const char*) nlopt_get_errmsg(nlopt_opt opt); /* constraints: */ -NLOPT_EXTERN(nlopt_result) nlopt_set_lower_bounds(nlopt_opt opt, +NLOPT_EXTERN(nlopt_result) nlopt_set_lower_bounds(nlopt_opt opt, const double *lb); NLOPT_EXTERN(nlopt_result) nlopt_set_lower_bounds1(nlopt_opt opt, double lb); -NLOPT_EXTERN(nlopt_result) nlopt_get_lower_bounds(const nlopt_opt opt, +NLOPT_EXTERN(nlopt_result) nlopt_get_lower_bounds(const nlopt_opt opt, double *lb); -NLOPT_EXTERN(nlopt_result) nlopt_set_upper_bounds(nlopt_opt opt, +NLOPT_EXTERN(nlopt_result) nlopt_set_upper_bounds(nlopt_opt opt, const double *ub); NLOPT_EXTERN(nlopt_result) nlopt_set_upper_bounds1(nlopt_opt opt, double ub); NLOPT_EXTERN(nlopt_result) nlopt_get_upper_bounds(const nlopt_opt opt, @@ -283,7 +285,7 @@ NLOPT_EXTERN(int) nlopt_get_force_stop(const nlopt_opt opt); /* more algorithm-specific parameters */ -NLOPT_EXTERN(nlopt_result) nlopt_set_local_optimizer(nlopt_opt opt, +NLOPT_EXTERN(nlopt_result) nlopt_set_local_optimizer(nlopt_opt opt, const nlopt_opt local_opt); NLOPT_EXTERN(nlopt_result) nlopt_set_population(nlopt_opt opt, unsigned pop); @@ -292,12 +294,12 @@ NLOPT_EXTERN(unsigned) nlopt_get_population(const nlopt_opt opt); NLOPT_EXTERN(nlopt_result) nlopt_set_vector_storage(nlopt_opt opt, unsigned dim); NLOPT_EXTERN(unsigned) nlopt_get_vector_storage(const nlopt_opt opt); -NLOPT_EXTERN(nlopt_result) nlopt_set_default_initial_step(nlopt_opt opt, +NLOPT_EXTERN(nlopt_result) nlopt_set_default_initial_step(nlopt_opt opt, const double *x); -NLOPT_EXTERN(nlopt_result) nlopt_set_initial_step(nlopt_opt opt, +NLOPT_EXTERN(nlopt_result) nlopt_set_initial_step(nlopt_opt opt, const double *dx); NLOPT_EXTERN(nlopt_result) nlopt_set_initial_step1(nlopt_opt opt, double dx); -NLOPT_EXTERN(nlopt_result) nlopt_get_initial_step(const nlopt_opt opt, +NLOPT_EXTERN(nlopt_result) nlopt_get_initial_step(const nlopt_opt opt, const double *x, double *dx); /* the following are functions mainly designed to be used internally @@ -323,7 +325,7 @@ NLOPT_EXTERN(void) nlopt_munge_data(nlopt_opt opt, #if defined(__GNUC__) && (__GNUC__ > 3 || (__GNUC__==3 && __GNUC_MINOR__ > 0)) # define NLOPT_DEPRECATED __attribute__((deprecated)) #else -# define NLOPT_DEPRECATED +# define NLOPT_DEPRECATED #endif typedef double (*nlopt_func_old)(int n, const double *x, diff --git a/src/api/optimize.c b/src/api/optimize.c index 9592653b..71614d21 100644 --- a/src/api/optimize.c +++ b/src/api/optimize.c @@ -34,6 +34,9 @@ #ifdef NLOPT_CXX # include "stogo.h" #endif +#ifdef NLOPT_CXX11 +# include "ags.h" +#endif #include "cdirect.h" @@ -347,6 +350,7 @@ static int elimdim_wrapcheck(nlopt_opt opt) case NLOPT_LN_SBPLX: case NLOPT_GN_ISRES: case NLOPT_GN_ESCH: + case NLOPT_GN_AGS: case NLOPT_GD_STOGO: case NLOPT_GD_STOGO_RAND: return 1; @@ -504,6 +508,17 @@ static nlopt_result nlopt_optimize_(nlopt_opt opt, double *x, double *minf) break; } + case NLOPT_GN_AGS: +#ifdef NLOPT_CXX11 + if (!finite_domain(n, lb, ub)) + RETURN_ERR(NLOPT_INVALID_ARGS, opt, + "finite domain required for global algorithm"); + return ags_minimize(ni, f, f_data, opt->m, opt->fc, x, minf, lb, ub, &stop); + break; +#else + return NLOPT_INVALID_ARGS; +#endif + case NLOPT_GD_STOGO: case NLOPT_GD_STOGO_RAND: #ifdef NLOPT_CXX diff --git a/src/api/options.c b/src/api/options.c index 04e0bfe7..c60729b6 100644 --- a/src/api/options.c +++ b/src/api/options.c @@ -437,7 +437,8 @@ static int inequality_ok(nlopt_algorithm algorithm) { || AUGLAG_ALG(algorithm) || algorithm == NLOPT_GN_ISRES || algorithm == NLOPT_GN_ORIG_DIRECT - || algorithm == NLOPT_GN_ORIG_DIRECT_L); + || algorithm == NLOPT_GN_ORIG_DIRECT_L + || algorithm == NLOPT_GN_AGS); } nlopt_result diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index f1fff664..8871312d 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -7,7 +7,7 @@ target_link_libraries (testopt ${nlopt_lib}) target_include_directories (testopt PRIVATE ${NLOPT_PRIVATE_INCLUDE_DIRS}) add_dependencies (tests testopt) -foreach (algo_index RANGE 29)# 42 +foreach (algo_index RANGE 29)# 43 foreach (obj_index RANGE 1)# 21 set (enable_ TRUE) # cxx stogo @@ -16,6 +16,10 @@ foreach (algo_index RANGE 29)# 42 set (enable_ FALSE) endif () endif () + # cxx11 ags + if (NOT NLOPT_CXX11 AND algo_index STREQUAL 43) + set (enable_ FALSE) + endif () # L-BFGS if (algo_index STREQUAL 10) set (enable_ FALSE)