Skip to content

Commit

Permalink
optimize: memoize wrapping
Browse files Browse the repository at this point in the history
  • Loading branch information
jschueller committed Nov 10, 2024
1 parent 612f9f6 commit 6a868a8
Show file tree
Hide file tree
Showing 4 changed files with 163 additions and 0 deletions.
2 changes: 2 additions & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@

* Dropped unused LD_LBFGS_NOCEDAL enum value.

* Fixed COBYLA not returning the optimum ([#57])

* Various minor bugfixes ([#570], [#368], [#563], [#504])

## NLopt 2.8
Expand Down
87 changes: 87 additions & 0 deletions src/api/optimize.c
Original file line number Diff line number Diff line change
Expand Up @@ -394,6 +394,69 @@ static int elimdim_wrapcheck(nlopt_opt opt)
}
}

/*********************************************************************/
/* wrapper functions for algorithms not considering all evaluated points as candidates
* or returning only the last evaluation which might not be optimum */

typedef struct {
nlopt_func f;
void *f_data;
const double *lb, *ub; /* bounds, of length n */
double minf;
double *bestx;
} memoize_data;


static double memoize_func(unsigned n, const double *x, double *grad, void *d_)
{
memoize_data *d = (memoize_data *) d_;
const double *lb = d->lb, *ub = d->ub;
double val;
unsigned i, feasible = 1;

val = d->f(n, x, grad, d->f_data);

for (i = 0; i < n; ++ i)
{
if (lb && (x[i] < lb[i]))
feasible = 0;
if (ub && (x[i] > ub[i]))
feasible = 0;
}
if (feasible && (val < d->minf))
{
d->minf = val;
for (i = 0; i < n; ++ i)
d->bestx[i] = x[i];
}
return val;
}


/* return whether to use memoize wrapping. */
static int memoize_wrapcheck(nlopt_opt opt)
{
if (!opt)
return 0;

/* constraints not supported */
if ((opt->m > 0) || (opt->p > 0))
return 0;

switch (opt->algorithm) {
case NLOPT_LN_COBYLA:
case NLOPT_LD_TNEWTON:
case NLOPT_LD_TNEWTON_RESTART:
case NLOPT_LD_TNEWTON_PRECOND:
case NLOPT_LD_TNEWTON_PRECOND_RESTART:
return 1;

default:
return 0;
}
}


/*********************************************************************/

#define POP(defaultpop) (opt->stochastic_population > 0 ? (int)opt->stochastic_population : (nlopt_stochastic_population > 0 ? nlopt_stochastic_population : (defaultpop)))
Expand Down Expand Up @@ -863,6 +926,7 @@ nlopt_result NLOPT_STDCALL nlopt_optimize(nlopt_opt opt, double *x, double *opt_
void *f_data;
nlopt_precond pre;
f_max_data fmd;
memoize_data mmzd;
int maximize;
nlopt_result ret;

Expand Down Expand Up @@ -891,6 +955,18 @@ nlopt_result NLOPT_STDCALL nlopt_optimize(nlopt_opt opt, double *x, double *opt_
opt->maximize = 0;
}

if (memoize_wrapcheck(opt))
{
mmzd.f = opt->f;
mmzd.f_data = opt->f_data;
mmzd.lb = opt->lb;
mmzd.ub = opt->ub;
mmzd.minf = DBL_MAX;
mmzd.bestx = (double *) calloc(opt->n, sizeof(double));
opt->f = memoize_func;
opt->f_data = &mmzd;
}

{ /* possibly eliminate lb == ub dimensions for some algorithms */
nlopt_opt elim_opt = opt;
if (elimdim_wrapcheck(opt)) {
Expand All @@ -916,6 +992,17 @@ nlopt_result NLOPT_STDCALL nlopt_optimize(nlopt_opt opt, double *x, double *opt_
}

done:

if (memoize_wrapcheck(opt))
{
for (unsigned i = 0; i < opt->n; ++ i)
x[i] = mmzd.bestx[i];
free(mmzd.bestx);
*opt_f = mmzd.minf;
opt->f = mmzd.f;
opt->f_data = mmzd.f_data;
}

if (maximize) { /* restore original signs */
opt->maximize = maximize;
opt->stopval = -opt->stopval;
Expand Down
2 changes: 2 additions & 0 deletions test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,8 @@ if (Python_FOUND AND NUMPY_FOUND AND (SWIG_FOUND OR (EXISTS ${PROJECT_SOURCE_DIR
set_tests_properties (test_python${algo_index} PROPERTIES ENVIRONMENT "${PYINSTALLCHECK_ENVIRONMENT}")
endforeach()

add_test (NAME test_memoize COMMAND ${Python_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/t_memoize.py ${algo_index})
set_tests_properties (test_memoize PROPERTIES ENVIRONMENT "${PYINSTALLCHECK_ENVIRONMENT}")
endif ()

if (OCTAVE_FOUND)
Expand Down
72 changes: 72 additions & 0 deletions test/t_memoize.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import nlopt
import math as m

numFuncEval = [0]
minf_ext = [1e300]

lb = [-10., -10.]
ub = [+10., +10.]


def myfunc(x, grad):
for j in range(2):
if m.isnan(x[j]) or x[j] < lb[j] or x[j] > ub[j]:
return 1.e10
numFuncEval[0] += 1

x1 = x[0]
x2 = x[1]

# http://al-roomi.org/benchmarks/unconstrained/2-dimensions/65-powell-s-badly-scaled-function
# Powell's Badly Scaled Function
# Range of initial points: -10 < xj < 10 , j=1,2
# Global minima: (x1,x2)=(1.098...e-5, 9.106...)
# f(x1,x2)=0
f1 = (10000. * x1 * x2 - 1.)**2
f2 = (m.exp(-x1) + m.exp(-x2) - 1.0001)**2
retval = f1 + f2

if grad.size > 0:
# raise ValueError('Cannot suppply gradient values')
grad[0] = 2.0 * (10000. * x1 * x2 - 1.) * 10000. * x2 + 2.0 * (m.exp(-x1) + m.exp(-x2) - 1.0001) * -1.0
grad[1] = 2.0 * (10000. * x1 * x2 - 1.) * 10000. * x1 + 2.0 * (m.exp(-x1) + m.exp(-x2) - 1.0001) * -1.0

# print("myfunc: x:", x, ", val:", retval)

if retval < minf_ext[0]:
minf_ext[0] = retval

return retval


for algo in range(nlopt.NUM_ALGORITHMS):
if algo in [nlopt.LD_LBFGS, nlopt.LD_VAR1, nlopt.LD_VAR2]:
continue
opt = nlopt.opt(algo, 2)

print('-'*40)
print("Algo:", opt.get_algorithm_name(), algo)
numFuncEval[0] = 0
minf_ext = [1e300]

opt.set_min_objective(myfunc)
opt.set_lower_bounds(lb)
opt.set_upper_bounds(ub)
opt.set_maxeval(int(1e4))
opt.set_xtol_rel(1e-4)
local_opt = nlopt.opt(nlopt.LD_MMA, 2)
opt.set_local_optimizer(local_opt)
x0 = [0.0, 0.0]
print("x0:", x0)
try:
x = opt.optimize(x0)
minf = opt.last_optimum_value()
print("optimum at ", x[0], x[1])
print("minimum value = ", minf)
print("result code = ", opt.last_optimize_result())
print("num function evaluations:", numFuncEval[0])
if minf_ext[0] < minf:
raise ValueError(f"minimum value {minf} is not the true minimum {minf_ext[0]}")
except nlopt.invalid_argument:
# stogo/ags might not be enabled
pass

0 comments on commit 6a868a8

Please sign in to comment.