From 35b5b8446fdfdffb426f463d18824b18d7bc03e3 Mon Sep 17 00:00:00 2001 From: Marc Becker <33069354+be-marc@users.noreply.github.com> Date: Sat, 27 Apr 2024 09:58:25 +0200 Subject: [PATCH] feat: add asynchronous parallelization (#210) * feat: add rush parallelization * refactor: worker_loop * chore: remove assertion * feat: freeze archive when rush is not available anymore * refactor: start workers * fix: termination in random search * chore: print xdt * refactor: send all hpcs at once in random search * docs: add params and fields * feat: store start and stop timestamps * test: OptimInstanceSingleCrit * docs: functions * chore: remotes * chore: pkgdown * test: remove rush * refactor: start_workers flag * feat: option to start workers when instance is created * feat: capture log on worker * refactor: extra in fail * feat: save log messages from worker * refactor: remove capture.output * fix: terminate * test: workflow * refactor: archive cache * fix: partial argument match * refactor: move parameter * refactor: OptimInstanceRush * ci: add redis * chore: remove browser * chore: pkgdown * draft * draft * fix: nas in trafo_xs * chore: leanify * chore: import rush * feat: optimize_decentralized * fix: add callbacks to optimize_decentralized * feat: add debug mode * fix: debug mode * feat: allow optimizer to control number of workers * fix: transformation functions in random search * feat: as_terminator * fix: assert_terminators * refactor: async classes * refactor: ArchiveAsync * chore: rename file * refactor: archive best * refactor: async * chore: internal * tests: rush * chore: pkgdown * refactor: async * refactor: async * refactor: batch * draft * draft * draft * draft * fix: debug * draft * draft * draft * fix: pkgdown * draft * draft * fix: remove deep clone from archivasync * tests: activate * tests: activate * tests: activate * tests: activate * chore: comment * chore: news --- .github/workflows/dev-cmd-check.yml | 4 + .github/workflows/r-cmd-check.yml | 4 + DESCRIPTION | 49 ++- NAMESPACE | 67 +++- NEWS.md | 11 +- R/Archive.R | 169 +--------- R/ArchiveAsync.R | 248 ++++++++++++++ R/ArchiveBatch.R | 181 ++++++++++ R/ArchiveBest.R | 105 ------ R/CallbackAsync.R | 123 +++++++ R/{CallbackOptimization.R => CallbackBatch.R} | 49 ++- R/ContextAsync.R | 45 +++ R/{ContextOptimization.R => ContextBatch.R} | 16 +- R/Objective.R | 29 +- R/OptimInstance.R | 201 +++-------- R/OptimInstanceAsync.R | 90 +++++ R/OptimInstanceAsyncMultiCrit.R | 79 +++++ R/OptimInstanceAsyncSingleCrit.R | 80 +++++ R/OptimInstanceBatch.R | 203 ++++++++++++ R/OptimInstanceBatchMultiCrit.R | 71 ++++ R/OptimInstanceBatchSingleCrit.R | 61 ++++ R/OptimInstanceMultiCrit.R | 70 ---- R/OptimInstanceSingleCrit.R | 56 ---- R/Optimizer.R | 61 ++-- R/OptimizerAsync.R | 141 ++++++++ R/OptimizerAsyncDesignPoints.R | 66 ++++ R/OptimizerAsyncGridSearch.R | 75 +++++ R/OptimizerAsyncRandomSearch.R | 63 ++++ R/OptimizerBatch.R | 72 ++++ R/{OptimizerCmaes.R => OptimizerBatchCmaes.R} | 10 +- ...nPoints.R => OptimizerBatchDesignPoints.R} | 8 +- ...usSearch.R => OptimizerBatchFocusSearch.R} | 8 +- R/{OptimizerGenSA.R => OptimizerBatchGenSA.R} | 8 +- ...ridSearch.R => OptimizerBatchGridSearch.R} | 6 +- R/{OptimizerIrace.R => OptimizerBatchIrace.R} | 18 +- ...timizerNLoptr.R => OptimizerBatchNLoptr.R} | 8 +- ...mSearch.R => OptimizerBatchRandomSearch.R} | 8 +- R/Terminator.R | 2 +- R/TerminatorClockTime.R | 2 +- R/TerminatorCombo.R | 2 +- R/TerminatorEvals.R | 2 +- R/TerminatorNone.R | 4 +- R/TerminatorPerfReached.R | 2 +- R/TerminatorRunTime.R | 2 +- R/TerminatorStagnation.R | 2 +- R/as_terminator.R | 40 +++ R/assertions.R | 104 +++++- R/bb_optimize.R | 4 +- R/helper.R | 81 ++--- R/mlr_callbacks.R | 6 +- R/sugar.R | 65 ++++ R/worker_loops.R | 28 ++ R/zzz.R | 2 +- README.Rmd | 2 +- README.md | 4 +- inst/WORDLIST | 8 +- man-roxygen/example.R | 2 +- man-roxygen/field_archive.R | 2 + man-roxygen/field_callbacks.R | 2 + man-roxygen/field_codomain.R | 3 + man-roxygen/field_context.R | 2 + man-roxygen/field_objective.R | 2 + man-roxygen/field_progressor.R | 2 + man-roxygen/field_restart_lost_workers.R | 2 + man-roxygen/field_rush.R | 2 + man-roxygen/field_search_space.R | 2 + man-roxygen/field_start_time.R | 4 + man-roxygen/field_terminator.R | 2 + man-roxygen/param_await_workers.R | 2 + man-roxygen/param_check_values.R | 3 +- man-roxygen/param_detect_lost_tasks.R | 3 + man-roxygen/param_freeze_archive.R | 4 + man-roxygen/param_heartbeat_expire.R | 2 + man-roxygen/param_heartbeat_period.R | 2 + man-roxygen/param_host.R | 2 + man-roxygen/param_lgr_thresholds.R | 3 + man-roxygen/param_n_workers.R | 3 + man-roxygen/param_null_ok.R | 2 + man-roxygen/param_objective.R | 2 + man-roxygen/param_packages.R | 2 + man-roxygen/param_rush.R | 2 + man-roxygen/param_start_workers.R | 2 + man-roxygen/param_terminator.R | 2 + man/Archive.Rd | 129 +------- man/ArchiveAsync.Rd | 312 ++++++++++++++++++ man/ArchiveBatch.Rd | 211 ++++++++++++ man/ArchiveBest.Rd | 143 -------- man/CallbackAsync.Rd | 75 +++++ ...llbackOptimization.Rd => CallbackBatch.Rd} | 26 +- man/ContextAsync.Rd | 85 +++++ ...ContextOptimization.Rd => ContextBatch.Rd} | 34 +- man/Objective.Rd | 21 +- man/ObjectiveRFun.Rd | 3 +- man/ObjectiveRFunDt.Rd | 3 +- man/ObjectiveRFunMany.Rd | 3 +- man/OptimInstance.Rd | 115 ++----- man/OptimInstanceAsync.Rd | 114 +++++++ man/OptimInstanceAsyncMultiCrit.Rd | 130 ++++++++ man/OptimInstanceAsyncSingleCrit.Rd | 130 ++++++++ man/OptimInstanceBatch.Rd | 170 ++++++++++ ...Crit.Rd => OptimInstanceBatchMultiCrit.Rd} | 75 ++--- ...rit.Rd => OptimInstanceBatchSingleCrit.Rd} | 70 ++-- man/Optimizer.Rd | 28 +- man/OptimizerAsync.Rd | 78 +++++ man/OptimizerBatch.Rd | 77 +++++ man/as_terminator.Rd | 33 ++ man/assign_result_default.Rd | 4 +- man/bb_optimize.Rd | 2 +- man/bbotk.backup.Rd | 2 +- man/bbotk_assertions.Rd | 40 ++- man/bbotk_worker_loop.Rd | 22 ++ man/callback_async.Rd | 77 +++++ ...back_optimization.Rd => callback_batch.Rd} | 18 +- man/evaluate_queue_default.Rd | 15 + man/mlr_optimizers_async_design_points.Rd | 96 ++++++ man/mlr_optimizers_async_grid_search.Rd | 103 ++++++ man/mlr_optimizers_async_random_search.Rd | 73 ++++ man/mlr_optimizers_cmaes.Rd | 30 +- man/mlr_optimizers_design_points.Rd | 30 +- man/mlr_optimizers_focus_search.Rd | 30 +- man/mlr_optimizers_gensa.Rd | 30 +- man/mlr_optimizers_grid_search.Rd | 30 +- man/mlr_optimizers_irace.Rd | 36 +- man/mlr_optimizers_nloptr.Rd | 30 +- man/mlr_optimizers_random_search.Rd | 30 +- man/mlr_terminators_none.Rd | 2 +- man/oi.Rd | 41 +++ man/oi_async.Rd | 41 +++ man/optimize_async_default.Rd | 17 + man/optimize_batch_default.Rd | 20 ++ man/optimize_default.Rd | 23 -- man/shrink_ps.Rd | 2 +- man/terminated_error.Rd | 15 + man/trafo_xs.Rd | 18 + pkgdown/_pkgdown.yml | 14 +- tests/testthat/helper.R | 42 ++- tests/testthat/helper_rs.R | 16 - tests/testthat/setup.R | 10 +- tests/testthat/teardown.R | 3 +- tests/testthat/test_ArchiveAsync.R | 83 +++++ .../{test_Archive.R => test_ArchiveBatch.R} | 32 +- tests/testthat/test_ArchiveBest.R | 69 ---- tests/testthat/test_Callback.R | 29 +- .../test_OptimInstanceAsyncSingleCrit.R | 61 ++++ ...t.R => test_OptimInstanceBatchMultiCrit.R} | 18 +- ....R => test_OptimInstanceBatchSingleCrit.R} | 43 ++- .../testthat/test_OptimizerAsynDesignPoints.R | 22 ++ tests/testthat/test_OptimizerAsynGridSearch.R | 21 ++ tests/testthat/test_OptimizerAsync.R | 69 ++++ .../test_OptimizerAsyncRandomSearch.R | 21 ++ tests/testthat/test_OptimizerCmaes.R | 8 +- tests/testthat/test_OptimizerDesignPoints.R | 10 +- tests/testthat/test_OptimizerFocusSearch.R | 10 +- tests/testthat/test_OptimizerGenSA.R | 6 +- tests/testthat/test_OptimizerGridSearch.R | 10 +- tests/testthat/test_OptimizerIrace.R | 22 +- tests/testthat/test_OptimizerNLoptr.R | 6 +- tests/testthat/test_OptimizerRandomSearch.R | 14 +- tests/testthat/test_TerminatorClockTime.R | 2 +- tests/testthat/test_TerminatorEvals.R | 2 +- tests/testthat/test_TerminatorNone.R | 2 +- tests/testthat/test_TerminatorPerfReached.R | 4 +- tests/testthat/test_TerminatorRunTime.R | 2 +- tests/testthat/test_TerminatorStagnation.R | 4 +- .../testthat/test_TerminatorStagnationBatch.R | 4 +- tests/testthat/test_bb_optimize.R | 16 +- tests/testthat/test_mlr_callbacks.R | 41 ++- tests/testthat/test_mlr_optimizers.R | 4 +- tests/testthat/test_nds_selection.R | 2 +- vignettes/bbotk.Rmd | 4 +- 170 files changed, 5013 insertions(+), 1643 deletions(-) create mode 100644 R/ArchiveAsync.R create mode 100644 R/ArchiveBatch.R delete mode 100644 R/ArchiveBest.R create mode 100644 R/CallbackAsync.R rename R/{CallbackOptimization.R => CallbackBatch.R} (76%) create mode 100644 R/ContextAsync.R rename R/{ContextOptimization.R => ContextBatch.R} (67%) create mode 100644 R/OptimInstanceAsync.R create mode 100644 R/OptimInstanceAsyncMultiCrit.R create mode 100644 R/OptimInstanceAsyncSingleCrit.R create mode 100644 R/OptimInstanceBatch.R create mode 100644 R/OptimInstanceBatchMultiCrit.R create mode 100644 R/OptimInstanceBatchSingleCrit.R delete mode 100644 R/OptimInstanceMultiCrit.R delete mode 100644 R/OptimInstanceSingleCrit.R create mode 100644 R/OptimizerAsync.R create mode 100644 R/OptimizerAsyncDesignPoints.R create mode 100644 R/OptimizerAsyncGridSearch.R create mode 100644 R/OptimizerAsyncRandomSearch.R create mode 100644 R/OptimizerBatch.R rename R/{OptimizerCmaes.R => OptimizerBatchCmaes.R} (92%) rename R/{OptimizerDesignPoints.R => OptimizerBatchDesignPoints.R} (91%) rename R/{OptimizerFocusSearch.R => OptimizerBatchFocusSearch.R} (97%) rename R/{OptimizerGenSA.R => OptimizerBatchGenSA.R} (93%) rename R/{OptimizerGridSearch.R => OptimizerBatchGridSearch.R} (90%) rename R/{OptimizerIrace.R => OptimizerBatchIrace.R} (96%) rename R/{OptimizerNLoptr.R => OptimizerBatchNLoptr.R} (94%) rename R/{OptimizerRandomSearch.R => OptimizerBatchRandomSearch.R} (87%) create mode 100644 R/as_terminator.R create mode 100644 R/worker_loops.R create mode 100644 man-roxygen/field_archive.R create mode 100644 man-roxygen/field_callbacks.R create mode 100644 man-roxygen/field_codomain.R create mode 100644 man-roxygen/field_context.R create mode 100644 man-roxygen/field_objective.R create mode 100644 man-roxygen/field_progressor.R create mode 100644 man-roxygen/field_restart_lost_workers.R create mode 100644 man-roxygen/field_rush.R create mode 100644 man-roxygen/field_search_space.R create mode 100644 man-roxygen/field_start_time.R create mode 100644 man-roxygen/field_terminator.R create mode 100644 man-roxygen/param_await_workers.R create mode 100644 man-roxygen/param_detect_lost_tasks.R create mode 100644 man-roxygen/param_freeze_archive.R create mode 100644 man-roxygen/param_heartbeat_expire.R create mode 100644 man-roxygen/param_heartbeat_period.R create mode 100644 man-roxygen/param_host.R create mode 100644 man-roxygen/param_lgr_thresholds.R create mode 100644 man-roxygen/param_n_workers.R create mode 100644 man-roxygen/param_null_ok.R create mode 100644 man-roxygen/param_objective.R create mode 100644 man-roxygen/param_packages.R create mode 100644 man-roxygen/param_rush.R create mode 100644 man-roxygen/param_start_workers.R create mode 100644 man-roxygen/param_terminator.R create mode 100644 man/ArchiveAsync.Rd create mode 100644 man/ArchiveBatch.Rd delete mode 100644 man/ArchiveBest.Rd create mode 100644 man/CallbackAsync.Rd rename man/{CallbackOptimization.Rd => CallbackBatch.Rd} (79%) create mode 100644 man/ContextAsync.Rd rename man/{ContextOptimization.Rd => ContextBatch.Rd} (62%) create mode 100644 man/OptimInstanceAsync.Rd create mode 100644 man/OptimInstanceAsyncMultiCrit.Rd create mode 100644 man/OptimInstanceAsyncSingleCrit.Rd create mode 100644 man/OptimInstanceBatch.Rd rename man/{OptimInstanceMultiCrit.Rd => OptimInstanceBatchMultiCrit.Rd} (56%) rename man/{OptimInstanceSingleCrit.Rd => OptimInstanceBatchSingleCrit.Rd} (54%) create mode 100644 man/OptimizerAsync.Rd create mode 100644 man/OptimizerBatch.Rd create mode 100644 man/as_terminator.Rd create mode 100644 man/bbotk_worker_loop.Rd create mode 100644 man/callback_async.Rd rename man/{callback_optimization.Rd => callback_batch.Rd} (85%) create mode 100644 man/evaluate_queue_default.Rd create mode 100644 man/mlr_optimizers_async_design_points.Rd create mode 100644 man/mlr_optimizers_async_grid_search.Rd create mode 100644 man/mlr_optimizers_async_random_search.Rd create mode 100644 man/oi.Rd create mode 100644 man/oi_async.Rd create mode 100644 man/optimize_async_default.Rd create mode 100644 man/optimize_batch_default.Rd delete mode 100644 man/optimize_default.Rd create mode 100644 man/terminated_error.Rd create mode 100644 man/trafo_xs.Rd delete mode 100644 tests/testthat/helper_rs.R create mode 100644 tests/testthat/test_ArchiveAsync.R rename tests/testthat/{test_Archive.R => test_ArchiveBatch.R} (88%) delete mode 100644 tests/testthat/test_ArchiveBest.R create mode 100644 tests/testthat/test_OptimInstanceAsyncSingleCrit.R rename tests/testthat/{test_OptimInstanceMultiCrit.R => test_OptimInstanceBatchMultiCrit.R} (77%) rename tests/testthat/{test_OptimInstanceSingleCrit.R => test_OptimInstanceBatchSingleCrit.R} (83%) create mode 100644 tests/testthat/test_OptimizerAsynDesignPoints.R create mode 100644 tests/testthat/test_OptimizerAsynGridSearch.R create mode 100644 tests/testthat/test_OptimizerAsync.R create mode 100644 tests/testthat/test_OptimizerAsyncRandomSearch.R diff --git a/.github/workflows/dev-cmd-check.yml b/.github/workflows/dev-cmd-check.yml index 906411830..ac47245e8 100644 --- a/.github/workflows/dev-cmd-check.yml +++ b/.github/workflows/dev-cmd-check.yml @@ -33,6 +33,10 @@ jobs: with: r-version: ${{ matrix.config.r }} + - uses: supercharge/redis-github-action@1.7.0 + with: + redis-version: 7 + - uses: r-lib/actions/setup-r-dependencies@v2 with: extra-packages: any::rcmdcheck diff --git a/.github/workflows/r-cmd-check.yml b/.github/workflows/r-cmd-check.yml index d5e5559b4..b980b4706 100644 --- a/.github/workflows/r-cmd-check.yml +++ b/.github/workflows/r-cmd-check.yml @@ -34,6 +34,10 @@ jobs: with: r-version: ${{ matrix.config.r }} + - uses: supercharge/redis-github-action@1.7.0 + with: + redis-version: 7 + - uses: r-lib/actions/setup-r-dependencies@v2 with: extra-packages: any::rcmdcheck diff --git a/DESCRIPTION b/DESCRIPTION index 4c0cb9c6b..b40282828 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -47,8 +47,9 @@ Imports: data.table, lgr, methods, - mlr3misc (>= 0.11.0), - R6 + mlr3misc (>= 0.15.0.9000), + R6, + rush Suggests: adagio, emoa, @@ -58,7 +59,11 @@ Suggests: nloptr, progressr, rmarkdown, + redux, testthat (>= 3.0.0) +Remotes: + mlr-org/rush, + mlr-org/mlr3misc VignetteBuilder: knitr Config/testthat/edition: 3 @@ -70,27 +75,39 @@ Roxygen: list(markdown = TRUE) RoxygenNote: 7.3.1 Collate: 'Archive.R' - 'ArchiveBest.R' - 'CallbackOptimization.R' + 'ArchiveAsync.R' + 'ArchiveBatch.R' + 'CallbackAsync.R' + 'CallbackBatch.R' 'Codomain.R' - 'ContextOptimization.R' + 'ContextAsync.R' + 'ContextBatch.R' 'Objective.R' 'ObjectiveRFun.R' 'ObjectiveRFunDt.R' 'ObjectiveRFunMany.R' 'OptimInstance.R' - 'OptimInstanceMultiCrit.R' - 'OptimInstanceSingleCrit.R' + 'OptimInstanceAsync.R' + 'OptimInstanceAsyncMultiCrit.R' + 'OptimInstanceAsyncSingleCrit.R' + 'OptimInstanceBatch.R' + 'OptimInstanceBatchMultiCrit.R' + 'OptimInstanceBatchSingleCrit.R' 'mlr_optimizers.R' 'Optimizer.R' - 'OptimizerCmaes.R' - 'OptimizerDesignPoints.R' - 'OptimizerFocusSearch.R' - 'OptimizerGenSA.R' - 'OptimizerGridSearch.R' - 'OptimizerIrace.R' - 'OptimizerNLoptr.R' - 'OptimizerRandomSearch.R' + 'OptimizerAsync.R' + 'OptimizerAsyncDesignPoints.R' + 'OptimizerAsyncGridSearch.R' + 'OptimizerAsyncRandomSearch.R' + 'OptimizerBatch.R' + 'OptimizerBatchCmaes.R' + 'OptimizerBatchDesignPoints.R' + 'OptimizerBatchFocusSearch.R' + 'OptimizerBatchGenSA.R' + 'OptimizerBatchGridSearch.R' + 'OptimizerBatchIrace.R' + 'OptimizerBatchNLoptr.R' + 'OptimizerBatchRandomSearch.R' 'Progressor.R' 'mlr_terminators.R' 'Terminator.R' @@ -102,6 +119,7 @@ Collate: 'TerminatorRunTime.R' 'TerminatorStagnation.R' 'TerminatorStagnationBatch.R' + 'as_terminator.R' 'assertions.R' 'bb_optimize.R' 'bbotk_reflections.R' @@ -111,4 +129,5 @@ Collate: 'nds_selection.R' 'reexport.R' 'sugar.R' + 'worker_loops.R' 'zzz.R' diff --git a/NAMESPACE b/NAMESPACE index 93177b1c2..6ef7131d4 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -1,31 +1,47 @@ # Generated by roxygen2: do not edit by hand -S3method(as.data.table,Archive) +S3method(as.data.table,ArchiveAsync) +S3method(as.data.table,ArchiveBatch) S3method(as.data.table,DictionaryOptimizer) S3method(as.data.table,DictionaryTerminator) +S3method(as_terminator,Terminator) +S3method(as_terminators,default) +S3method(as_terminators,list) S3method(bb_optimize,"function") S3method(bb_optimize,Objective) export(Archive) -export(ArchiveBest) -export(CallbackOptimization) +export(ArchiveAsync) +export(ArchiveBatch) +export(CallbackAsync) +export(CallbackBatch) export(Codomain) -export(ContextOptimization) +export(ContextAsync) +export(ContextBatch) export(Objective) export(ObjectiveRFun) export(ObjectiveRFunDt) export(ObjectiveRFunMany) export(OptimInstance) -export(OptimInstanceMultiCrit) -export(OptimInstanceSingleCrit) +export(OptimInstanceAsync) +export(OptimInstanceAsyncMultiCrit) +export(OptimInstanceAsyncSingleCrit) +export(OptimInstanceBatch) +export(OptimInstanceBatchMultiCrit) +export(OptimInstanceBatchSingleCrit) export(Optimizer) -export(OptimizerCmaes) -export(OptimizerDesignPoints) -export(OptimizerFocusSearch) -export(OptimizerGenSA) -export(OptimizerGridSearch) -export(OptimizerIrace) -export(OptimizerNLoptr) -export(OptimizerRandomSearch) +export(OptimizerAsync) +export(OptimizerAsyncDesignPoints) +export(OptimizerAsyncGridSearch) +export(OptimizerAsyncRandomSearch) +export(OptimizerBatch) +export(OptimizerBatchCmaes) +export(OptimizerBatchDesignPoints) +export(OptimizerBatchFocusSearch) +export(OptimizerBatchGenSA) +export(OptimizerBatchGridSearch) +export(OptimizerBatchIrace) +export(OptimizerBatchNLoptr) +export(OptimizerBatchRandomSearch) export(Terminator) export(TerminatorClockTime) export(TerminatorCombo) @@ -35,28 +51,46 @@ export(TerminatorPerfReached) export(TerminatorRunTime) export(TerminatorStagnation) export(TerminatorStagnationBatch) +export(as_terminator) +export(as_terminators) +export(assert_archive) +export(assert_archive_async) +export(assert_archive_batch) +export(assert_instance) +export(assert_instance_async) export(assert_instance_properties) export(assert_optimizer) +export(assert_optimizer_async) +export(assert_optimizer_batch) export(assert_set) export(assert_terminable) export(assert_terminator) +export(assert_terminators) export(assign_result_default) export(bb_optimize) export(bbotk_reflections) +export(bbotk_worker_loop) export(branin) export(branin_wu) -export(callback_optimization) +export(callback_async) +export(callback_batch) export(clbk) export(clbks) +export(evaluate_queue_default) export(is_dominated) export(mlr_callbacks) export(mlr_optimizers) export(mlr_terminators) export(nds_selection) +export(oi) +export(oi_async) export(opt) -export(optimize_default) +export(optimize_async_default) +export(optimize_batch_default) export(opts) export(shrink_ps) +export(terminated_error) +export(trafo_xs) export(transform_xdt_to_xss) export(trm) export(trms) @@ -64,6 +98,7 @@ import(checkmate) import(data.table) import(mlr3misc) import(paradox) +import(rush) importFrom(R6,R6Class) importFrom(methods,formalArgs) importFrom(mlr3misc,clbk) diff --git a/NEWS.md b/NEWS.md index 5fb26356d..3c5a79d64 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,5 +1,12 @@ # bbotk (development version) +* feat: Introduce asynchronous optimization with the `OptimizerAsync` and `OptimInstanceAsync` classes. +* BREAKING CHANGE: The `Optimizer` class is `OptimizerBatch` now. +* BREAKING CHANGE: The `OptimInstance` class and subclasses are `OptimInstanceBatch*` now. +* BREAKING CHANGE: The `CallbackOptimization` class is `CallbackBatch` now. +* BREAKING CHANGE: The `ContextOptimization` class is `ContextBatch` now. +* BREAKING CHANGE: Remove `ArchiveBest` class and `keep_evals` parameter from `OptimInstance`. + # bbotk 0.8.0 * fix: `OptimizerIrace` failed with logical parameters and dependencies. @@ -12,7 +19,7 @@ # bbotk 0.7.2 -* fix: Standalone `Tuner` and `FSelector` were rejected by `ContextOptimization`. +* fix: Standalone `Tuner` and `FSelector` were rejected by `ContextBatch`. # bbotk 0.7.1 @@ -35,7 +42,7 @@ * fix: `OptimizerIrace` did not work with parameters with multiple dependencies. * feat: Add new callback that backups the archive to disk to `mlr_callbacks`. -* feat: Create custom callbacks with the `callback_optimization()` function. +* feat: Create custom callbacks with the `callback_batch()` function. # bbotk 0.5.4 diff --git a/R/Archive.R b/R/Archive.R index 3ccda041d..aeff22cd6 100644 --- a/R/Archive.R +++ b/R/Archive.R @@ -1,155 +1,45 @@ -#' @title Logging object for objective function evaluations +#' @title Data Storage #' #' @description -#' Container around a [data.table::data.table] which stores all performed -#' function calls of the Objective. +#' The `Archive`` class stores all evaluated points and performance scores #' -#' @section S3 Methods: -#' * `as.data.table(archive)`\cr -#' [Archive] -> [data.table::data.table()]\cr -#' Returns a tabular view of all performed function calls of the -#' Objective. The `x_domain` column is unnested to separate columns. +#' @details +#' The `Archive` is an abstract class that implements the base functionality each archive must provide. +#' +#' @template field_search_space +#' @template field_codomain +#' @template field_start_time #' -#' @template param_codomain #' @template param_search_space -#' @template param_xdt -#' @template param_ydt -#' @template param_n_select -#' @template param_ref_point +#' @template param_codomain +#' #' @export Archive = R6Class("Archive", public = list( - #' @field search_space ([paradox::ParamSet])\cr - #' Search space of objective. search_space = NULL, - #' @field codomain ([Codomain])\cr - #' Codomain of objective function. codomain = NULL, - #' @field start_time ([POSIXct])\cr - #' Time stamp of when the optimization started. The time is set by the - #' [Optimizer]. start_time = NULL, #' @field check_values (`logical(1)`)\cr #' Determines if points and results are checked for validity. check_values = NULL, - #' @field data ([data.table::data.table])\cr - #' Contains all performed [Objective] function calls. - data = NULL, - - #' @field data_extra (named `list`)\cr - #' Data created by specific [`Optimizer`]s that does not relate to any individual function evaluation and can therefore not be held in `$data`. - #' Every optimizer should create and refer to its own entry in this list, named by its `class()`. - data_extra = named_list(), - #' @description #' Creates a new instance of this [R6][R6::R6Class] class. #' #' @param check_values (`logical(1)`)\cr #' Should x-values that are added to the archive be checked for validity? #' Search space that is logged into archive. - initialize = function(search_space, codomain, check_values = TRUE) { + initialize = function(search_space, codomain, check_values = FALSE) { self$search_space = assert_param_set(search_space) assert_param_set(codomain) # get "codomain" element if present (new paradox) or default to $params (old paradox) params = get0("domains", codomain, ifnotfound = codomain$params) self$codomain = Codomain$new(params) self$check_values = assert_flag(check_values) - self$data = data.table() - }, - - #' @description - #' Adds function evaluations to the archive table. - #' - #' @param xss_trafoed (`list()`)\cr - #' Transformed point(s) in the *domain space*. - add_evals = function(xdt, xss_trafoed = NULL, ydt) { - assert_data_table(xdt) - assert_data_table(ydt) - assert_list(xss_trafoed, null.ok = TRUE) - assert_data_table(ydt[, self$cols_y, with = FALSE], any.missing = FALSE) - if (self$check_values) { - self$search_space$assert_dt(xdt[, self$cols_x, with = FALSE]) - } - xydt = cbind(xdt, ydt) - assert_subset(c(self$search_space$ids(), self$codomain$ids()), colnames(xydt)) - if (!is.null(xss_trafoed)) set(xydt, j = "x_domain", value = list(xss_trafoed)) - set(xydt, j = "timestamp", value = Sys.time()) - batch_nr = self$data$batch_nr - set(xydt, j = "batch_nr", value = if (length(batch_nr)) max(batch_nr) + 1L else 1L) - self$data = rbindlist(list(self$data, xydt), fill = TRUE, use.names = TRUE) - }, - - #' @description - #' Returns the best scoring evaluation(s). - #' For single-crit optimization, the solution that minimizes / maximizes the objective function. - #' For multi-crit optimization, the Pareto set / front. - #' - #' @param batch (`integer()`)\cr - #' The batch number(s) to limit the best results to. - #' Default is all batches. - #' @param n_select (`integer(1L)`)\cr - #' Amount of points to select. - #' Ignored for multi-crit optimization. - #' @param ties_method (`character(1L)`)\cr - #' Method to break ties when multiple points have the same score. - #' Either `"first"` (default) or `"random"`. - #' Ignored for multi-crit optimization. - #' If `n_select > 1L`, the tie method is ignored and the first point is returned. - #' - #' @return [data.table::data.table()] - best = function(batch = NULL, n_select = 1L, ties_method = "first") { - if (!self$n_batch) return(data.table()) - assert_subset(batch, seq_len(self$n_batch)) - assert_int(n_select, lower = 1L) - assert_choice(ties_method, c("first", "random")) - - tab = if (is.null(batch)) self$data else self$data[list(batch), , on = "batch_nr"] - - if (self$codomain$target_length == 1L) { - if (n_select == 1L) { - # use which_max to find the best point - y = tab[[self$cols_y]] * -self$codomain$maximization_to_minimization - ii = which_max(y, ties_method = ties_method) - tab[ii] - } else { - # copy table to avoid changing the order of the archive - if (is.null(batch)) tab = copy(self$data) - # use data.table fast sort to find the best points - setorderv(tab, cols = self$cols_y, order = self$codomain$maximization_to_minimization) - head(tab, n_select) - } - } else { - # use non-dominated sorting to find the best points - ymat = t(as.matrix(tab[, self$cols_y, with = FALSE])) - ymat = self$codomain$maximization_to_minimization * ymat - tab[!is_dominated(ymat)] - } - }, - - #' @description - #' Calculate best points w.r.t. non dominated sorting with hypervolume contribution. - #' - #' @param batch (`integer()`)\cr - #' The batch number(s) to limit the best points to. Default is - #' all batches. - #' - #' @return [data.table::data.table()] - nds_selection = function(batch = NULL, n_select = 1, ref_point = NULL) { - if (!self$n_batch) return(data.table()) - assert_subset(batch, seq_len(self$n_batch)) - - tab = if (is.null(batch)) self$data else self$data[list(batch), , on = "batch_nr"] - assert_int(n_select, lower = 1L, upper = nrow(tab)) - - points = t(as.matrix(tab[, self$cols_y, with = FALSE])) - minimize = map_lgl(self$codomain$target_tags, has_element, "minimize") - ii = nds_selection(points, n_select, ref_point, minimize) - tab[ii, ] }, #' @description @@ -171,27 +61,13 @@ Archive = R6Class("Archive", #' @description #' Clear all evaluation results from archive. clear = function() { - self$data = data.table() self$start_time = NULL + invisible(self) } ), active = list( - #' @field n_evals (`integer(1)`)\cr - #' Number of evaluations stored in the archive. - n_evals = function() nrow(self$data), - - #' @field n_batch (`integer(1)`)\cr - #' Number of batches stored in the archive. - n_batch = function() { - if (is.null(self$data$batch_nr)) { - 0L - } else { - max(self$data$batch_nr) - } - }, - #' @field cols_x (`character()`)\cr #' Column names of search space parameters. cols_x = function() self$search_space$ids(), @@ -199,27 +75,6 @@ Archive = R6Class("Archive", #' @field cols_y (`character()`)\cr #' Column names of codomain target parameters. cols_y = function() self$codomain$target_ids - ), - - private = list( - .data = NULL, - - deep_clone = function(name, value) { - switch(name, - search_space = value$clone(deep = TRUE), - codomain = value$clone(deep = TRUE), - data = copy(value), - value - ) - } ) ) -#' @export -as.data.table.Archive = function(x, ...) { # nolint - if (is.null(x$data$x_domain) || !nrow(x$data)) { - copy(x$data) - } else { - unnest(copy(x$data), "x_domain", prefix = "{col}_") - } -} diff --git a/R/ArchiveAsync.R b/R/ArchiveAsync.R new file mode 100644 index 000000000..d7c983bdb --- /dev/null +++ b/R/ArchiveAsync.R @@ -0,0 +1,248 @@ +#' @title Rush Data Storage +#' +#' @description +#' The `ArchiveAsync` stores all evaluated points and performance scores in a [rush::Rush] data base. +#' +#' @section S3 Methods: +#' * `as.data.table(archive)`\cr +#' [ArchiveAsync] -> [data.table::data.table()]\cr +#' Returns a tabular view of all performed function calls of the Objective. +#' The `x_domain` column is unnested to separate columns. +#' +#' @template param_search_space +#' @template param_codomain +#' @template param_check_values +#' @template param_rush +#' +#' @template field_rush +#' +#' @export +ArchiveAsync = R6Class("ArchiveAsync", + inherit = Archive, + cloneable = FALSE, + public = list( + + rush = NULL, + + #' @description + #' Creates a new instance of this [R6][R6::R6Class] class. + initialize = function(search_space, codomain, check_values = FALSE, rush) { + self$rush = assert_rush(rush) + + super$initialize( + search_space = search_space, + codomain = codomain, + check_values = check_values) + }, + + #' @description + #' Push queued points to the archive. + #' + #' @param xss (list of named `list()`)\cr + #' List of named lists of point values. + push_points = function(xss) { + if (self$check_values) map(xss, self$search_space$assert) + self$rush$push_tasks(xss, extra = list(list(timestamp_xs = Sys.time()))) + }, + + #' @description + #' Pop a point from the queue. + pop_point = function() { + self$rush$pop_task(fields = "xs") + }, + + #' @description + #' Push running point to the archive. + #' + #' @param xs (named `list`)\cr + #' Named list of point values. + #' @param extra (`list()`)\cr + #' Named list of additional information. + push_running_point = function(xs, extra = NULL) { + if (self$check_values) self$search_space$assert(xs) + extra = c(list(timestamp_xs = Sys.time()), extra) + self$rush$push_running_tasks(list(xs), extra = list(extra)) + }, + + #' @description + #' Push result to the archive. + #' + #' @param key (`character()`)\cr + #' Key of the point. + #' @param ys (`list()`)\cr + #' Named list of results. + #' @param x_domain (`list()`)\cr + #' Named list of transformed point values. + #' @param extra (`list()`)\cr + #' Named list of additional information. + push_result = function(key, ys, x_domain, extra = NULL) { + extra = c(list(x_domain = list(x_domain), timestamp_ys = Sys.time()), extra) + self$rush$push_results(key, list(ys), extra = list(extra)) + }, + + #' @description + #' Push failed point to the archive. + #' + #' @param key (`character()`)\cr + #' Key of the point. + #' @param message (`character()`)\cr + #' Error message. + push_failed_point = function(key, message) { + self$rush$push_failed(key, list(list(message = message))) + }, + + #' @description + #' Fetch points with a specific state. + #' + #' @param fields (`character()`)\cr + #' Fields to fetch. + #' Defaults to `c("xs", "ys", "xs_extra", "worker_extra", "ys_extra")`. + #' @param states (`character()`)\cr + #' States of the tasks to be fetched. + #' Defaults to `c("queued", "running", "finished", "failed")`. + #' @param reset_cache (`logical(1)`)\cr + #' Whether to reset the cache of the finished points. + data_with_state = function( + fields = c("xs", "ys", "xs_extra", "worker_extra", "ys_extra", "condition"), + states = c("queued", "running", "finished", "failed"), + reset_cache = FALSE + ) { + self$rush$fetch_tasks_with_state(fields, states, reset_cache) + }, + + #' @description + #' Returns the best scoring evaluation(s). + #' For single-crit optimization, the solution that minimizes / maximizes the objective function. + #' For multi-crit optimization, the Pareto set / front. + #' + #' @param n_select (`integer(1L)`)\cr + #' Amount of points to select. + #' Ignored for multi-crit optimization. + #' @param ties_method (`character(1L)`)\cr + #' Method to break ties when multiple points have the same score. + #' Either `"first"` (default) or `"random"`. + #' Ignored for multi-crit optimization. + #' If `n_select > 1L`, the tie method is ignored and the first point is returned. + #' + #' @return [data.table::data.table()] + best = function(n_select = 1, ties_method = "first") { + assert_count(n_select) + tab = self$data + + if (self$codomain$target_length == 1L) { + if (n_select == 1L) { + # use which_max to find the best point + y = tab[[self$cols_y]] * -self$codomain$maximization_to_minimization + ii = which_max(y, ties_method = ties_method) + tab[ii] + } else { + # copy table to avoid changing the order of the archive + tab = copy(self$data) + # use data.table fast sort to find the best points + setorderv(tab, cols = self$cols_y, order = self$codomain$maximization_to_minimization) + head(tab, n_select) + } + } else { + # use non-dominated sorting to find the best points + ymat = t(as.matrix(tab[, self$cols_y, with = FALSE])) + ymat = self$codomain$maximization_to_minimization * ymat + tab[!is_dominated(ymat)] + } + }, + + #' @description + #' Calculate best points w.r.t. non dominated sorting with hypervolume contribution. + #' + #' @template param_n_select + #' @template param_ref_point + #' + #' @return [data.table::data.table()] + nds_selection = function(n_select = 1, ref_point = NULL) { + tab = self$data + assert_int(n_select, lower = 1L, upper = nrow(tab)) + + points = t(as.matrix(tab[, self$cols_y, with = FALSE])) + minimize = map_lgl(self$codomain$target_tags, has_element, "minimize") + inds = nds_selection(points, n_select, ref_point, minimize) + tab[inds, ] + }, + + #' @description + #' Clear all evaluation results from archive. + clear = function() { + self$rush$reset() + super$clear() + } + ), + + active = list( + + #' @field data ([data.table::data.table])\cr + #' Data table with all finished points. + data = function(rhs) { + assert_ro_binding(rhs) + self$data_with_state() + }, + + #' @field queued_data ([data.table::data.table])\cr + #' Data table with all queued points. + queued_data = function() { + self$rush$fetch_queued_tasks() + }, + + #' @field running_data ([data.table::data.table])\cr + #' Data table with all running points. + running_data = function() { + self$rush$fetch_running_tasks() + }, + + #' @field finished_data ([data.table::data.table])\cr + #' Data table with all finished points. + finished_data = function() { + self$rush$fetch_finished_tasks() + }, + + #' @field failed_data ([data.table::data.table])\cr + #' Data table with all failed points. + failed_data = function() { + self$rush$fetch_failed_tasks() + }, + + #' @field n_queued (`integer(1)`)\cr + #' Number of queued points. + n_queued = function() { + self$rush$n_queued_tasks + }, + + #' @field n_running (`integer(1)`)\cr + #' Number of running points. + n_running = function() { + self$rush$n_running_tasks + }, + + #' @field n_finished (`integer(1)`)\cr + #' Number of finished points. + n_finished = function() { + self$rush$n_finished_tasks + }, + + #' @field n_failed (`integer(1)`)\cr + #' Number of failed points. + n_failed = function() { + self$rush$n_failed_tasks + }, + + #' @field n_evals (`integer(1)`)\cr + #' Number of evaluations stored in the archive. + n_evals = function() { + self$rush$n_finished_tasks + self$rush$n_failed_tasks + } + ) +) + +#' @export +as.data.table.ArchiveAsync = function(x, keep.rownames = FALSE, unnest = "x_domain", ...) { # nolint + data = x$data_with_state() + cols = intersect(unnest, names(data)) + unnest(data, cols, prefix = "{col}_") +} diff --git a/R/ArchiveBatch.R b/R/ArchiveBatch.R new file mode 100644 index 000000000..e7bd29f92 --- /dev/null +++ b/R/ArchiveBatch.R @@ -0,0 +1,181 @@ +#' @title Data Table Storage +#' +#' @description +#' The `ArchiveBatch` stores all evaluated points and performance scores in a [data.table::data.table()]. +#' +#' @section S3 Methods: +#' * `as.data.table(archive)`\cr +#' [ArchiveBatch] -> [data.table::data.table()]\cr +#' Returns a tabular view of all performed function calls of the Objective. +#' The `x_domain` column is unnested to separate columns. +#' +#' @template param_codomain +#' @template param_search_space +#' @template param_xdt +#' @template param_ydt +#' @template param_n_select +#' @template param_ref_point +#' +#' @export +ArchiveBatch = R6Class("ArchiveBatch", + inherit = Archive, + public = list( + + #' @field data ([data.table::data.table])\cr + #' Contains all performed [Objective] function calls. + data = NULL, + + #' @field data_extra (named `list`)\cr + #' Data created by specific [`Optimizer`]s that does not relate to any individual function evaluation and can therefore not be held in `$data`. + #' Every optimizer should create and refer to its own entry in this list, named by its `class()`. + data_extra = named_list(), + + #' @description + #' Creates a new instance of this [R6][R6::R6Class] class. + #' + #' @param check_values (`logical(1)`)\cr + #' Should x-values that are added to the archive be checked for validity? + #' Search space that is logged into archive. + initialize = function(search_space, codomain, check_values = FALSE) { + super$initialize( + search_space = search_space, + codomain = codomain, + check_values = check_values) # FIXME: not implemented yet + self$data = data.table() + }, + + #' @description + #' Adds function evaluations to the archive table. + #' + #' @param xss_trafoed (`list()`)\cr + #' Transformed point(s) in the *domain space*. + add_evals = function(xdt, xss_trafoed = NULL, ydt) { + assert_data_table(xdt) + assert_data_table(ydt) + assert_list(xss_trafoed, null.ok = TRUE) + assert_data_table(ydt[, self$cols_y, with = FALSE], any.missing = FALSE) + if (self$check_values) { + self$search_space$assert_dt(xdt[, self$cols_x, with = FALSE]) + } + xydt = cbind(xdt, ydt) + assert_subset(c(self$search_space$ids(), self$codomain$ids()), colnames(xydt)) + if (!is.null(xss_trafoed)) set(xydt, j = "x_domain", value = list(xss_trafoed)) + set(xydt, j = "timestamp", value = Sys.time()) + batch_nr = self$data$batch_nr + set(xydt, j = "batch_nr", value = if (length(batch_nr)) max(batch_nr) + 1L else 1L) + self$data = rbindlist(list(self$data, xydt), fill = TRUE, use.names = TRUE) + }, + + #' @description + #' Returns the best scoring evaluation(s). + #' For single-crit optimization, the solution that minimizes / maximizes the objective function. + #' For multi-crit optimization, the Pareto set / front. + #' + #' @param batch (`integer()`)\cr + #' The batch number(s) to limit the best results to. + #' Default is all batches. + #' @param n_select (`integer(1L)`)\cr + #' Amount of points to select. + #' Ignored for multi-crit optimization. + #' @param ties_method (`character(1L)`)\cr + #' Method to break ties when multiple points have the same score. + #' Either `"first"` (default) or `"random"`. + #' Ignored for multi-crit optimization. + #' If `n_select > 1L`, the tie method is ignored and the first point is returned. + #' + #' @return [data.table::data.table()] + best = function(batch = NULL, n_select = 1L, ties_method = "first") { + if (!self$n_batch) return(data.table()) + assert_subset(batch, seq_len(self$n_batch)) + assert_int(n_select, lower = 1L) + assert_choice(ties_method, c("first", "random")) + + tab = if (is.null(batch)) self$data else self$data[list(batch), , on = "batch_nr"] + + if (self$codomain$target_length == 1L) { + if (n_select == 1L) { + # use which_max to find the best point + y = tab[[self$cols_y]] * -self$codomain$maximization_to_minimization + ii = which_max(y, ties_method = ties_method) + tab[ii] + } else { + # copy table to avoid changing the order of the archive + if (is.null(batch)) tab = copy(self$data) + # use data.table fast sort to find the best points + setorderv(tab, cols = self$cols_y, order = self$codomain$maximization_to_minimization) + head(tab, n_select) + } + } else { + # use non-dominated sorting to find the best points + ymat = t(as.matrix(tab[, self$cols_y, with = FALSE])) + ymat = self$codomain$maximization_to_minimization * ymat + tab[!is_dominated(ymat)] + } + }, + + #' @description + #' Calculate best points w.r.t. non dominated sorting with hypervolume contribution. + #' + #' @param batch (`integer()`)\cr + #' The batch number(s) to limit the best points to. Default is + #' all batches. + #' + #' @return [data.table::data.table()] + nds_selection = function(batch = NULL, n_select = 1, ref_point = NULL) { + if (!self$n_batch) return(data.table()) + assert_subset(batch, seq_len(self$n_batch)) + + tab = if (is.null(batch)) self$data else self$data[list(batch), , on = "batch_nr"] + assert_int(n_select, lower = 1L, upper = nrow(tab)) + + points = t(as.matrix(tab[, self$cols_y, with = FALSE])) + minimize = map_lgl(self$codomain$target_tags, has_element, "minimize") + ii = nds_selection(points, n_select, ref_point, minimize) + tab[ii, ] + }, + + #' @description + #' Clear all evaluation results from archive. + clear = function() { + self$data = data.table() + super$clear() + } + ), + + active = list( + + #' @field n_evals (`integer(1)`)\cr + #' Number of evaluations stored in the archive. + n_evals = function() nrow(self$data), + + #' @field n_batch (`integer(1)`)\cr + #' Number of batches stored in the archive. + n_batch = function() { + if (is.null(self$data$batch_nr)) { + 0L + } else { + max(self$data$batch_nr) + } + } + ), + + private = list( + .data = NULL, + + deep_clone = function(name, value) { + switch(name, + search_space = value$clone(deep = TRUE), + codomain = value$clone(deep = TRUE), + data = copy(value), + value + ) + } + ) +) + +#' @export +as.data.table.ArchiveBatch = function(x, keep.rownames = FALSE, unnest = "x_domain", ...) { # nolint + data = copy(x$data) + cols = intersect(unnest, names(data)) + unnest(data, cols, prefix = "{col}_") +} diff --git a/R/ArchiveBest.R b/R/ArchiveBest.R deleted file mode 100644 index 9d1f76ca9..000000000 --- a/R/ArchiveBest.R +++ /dev/null @@ -1,105 +0,0 @@ -#' @title Minimal logging object for objective function evaluations -#' -#' @description -#' The [ArchiveBest] stores no data but records the best scoring evaluation -#' passed to `$add_evals()`. The [Archive] API is fully implemented but many -#' parameters are ignored and some methods do nothing. The archive still works -#' with [TerminatorClockTime], [TerminatorEvals], [TerminatorNone] and -#' [TerminatorEvals]. -#' -#' @template param_codomain -#' @template param_search_space -#' @template param_xdt -#' @template param_ydt -#' @export -ArchiveBest = R6Class("ArchiveBest", - inherit = Archive, - public = list( - - #' @description - #' Creates a new instance of this [R6][R6::R6Class] class. - #' - #' @param check_values (`logical(1)`)\cr - #' ignored. - initialize = function(search_space, codomain, check_values = FALSE) { - super$initialize(search_space, codomain, check_values = check_values) - private$.max_to_min = self$codomain$maximization_to_minimization - if (self$codomain$length == 1) private$.best_y = if (private$.max_to_min == -1) -Inf else Inf - }, - - #' @description - #' Stores the best result in `ydt`. - #' - #' @param xss_trafoed (`list()`)\cr - #' Transformed point(s) in the *domain space*. - add_evals = function(xdt, xss_trafoed = NULL, ydt) { - private$.n_evals = private$.n_evals + nrow(xdt) - - if (self$codomain$length == 1) { - y = ydt[[1]] * private$.max_to_min - id = which_min(y) - if (y[id] < private$.best_y * private$.max_to_min) { - private$.best_y = ydt[id, ] - private$.best_x = xdt[id, ] - private$.best_x_trafoed = if (!is.null(xss_trafoed)) xss_trafoed[id] - } - } else { - y = rbindlist(list(ydt, private$.best_y)) - x = rbindlist(list(xdt, private$.best_x)) - - ymat = t(as.matrix(y)) - ymat = private$.max_to_min * ymat - id = !is_dominated(ymat) - - private$.best_y = y[id, ] - private$.best_x = x[id, ] - private$.best_x_trafoed = if (!is.null(xss_trafoed)) xss_trafoed[id] - } - }, - - #' @description - #' Returns the best scoring evaluation. For single-crit optimization, - #' the solution that minimizes / maximizes the objective function. - #' For multi-crit optimization, the Pareto set / front. - #' - #' @param m (`integer()`)\cr - #' ignored. - #' - #' @return [data.table::data.table()] - best = function(m = NULL) { - if (self$n_evals == 0) { - stop("No results stored in archive") - } else { - cbind(private$.best_x, private$.best_y) - } - } - ), - - active = list( - - #' @field n_evals (`integer(1)`)\cr - #' Number of evaluations stored in the archive. - n_evals = function() private$.n_evals, - - #' @field n_batch (`integer(1)`)\cr - #' Number of batches stored in the archive. - n_batch = function() 1 - ), - - private = list( - # Is increased by $add_evals() - .n_evals = 0, - - # Stores best x - .best_x = NULL, - - # Stores best x trafoed - .best_x_trafoed = NULL, - - # Stores best y - .best_y = NULL, - - # Stores max to min vector - .max_to_min = NULL - ) -) diff --git a/R/CallbackAsync.R b/R/CallbackAsync.R new file mode 100644 index 000000000..05a396942 --- /dev/null +++ b/R/CallbackAsync.R @@ -0,0 +1,123 @@ +#' @title Create Asynchronous Optimization Callback +#' +#' @description +#' Specialized [mlr3misc::Callback] for asynchronous optimization. +#' Callbacks allow to customize the behavior of processes in bbotk. +#' The [callback_async()] function creates a [CallbackAsync]. +#' Predefined callbacks are stored in the [dictionary][mlr3misc::Dictionary] [mlr_callbacks] and can be retrieved with [clbk()]. +#' For more information on optimization callbacks see [callback_async()]. +#' +#' @export +CallbackAsync = R6Class("CallbackAsync", + inherit = Callback, + public = list( + + #' @field on_optimization_begin (`function()`)\cr + #' Stage called at the beginning of the optimization in the main process. + #' Called in `Optimizer$optimize()`. + on_optimization_begin = NULL, + + #' @field on_worker_begin (`function()`)\cr + #' Stage called at the beginning of the optimization on the worker. + #' Called in the worker loop. + on_worker_begin = NULL, + + #' @field on_worker_end (`function()`)\cr + #' Stage called at the end of the optimization on the worker. + #' Called in the worker loop. + on_worker_end = NULL, + + #' @field on_result (`function()`)\cr + #' Stage called after result are written. + #' Called in `OptimInstance$assign_result()`. + on_result = NULL, + + #' @field on_optimization_end (`function()`)\cr + #' Stage called at the end of the optimization in the main process. + #' Called in `Optimizer$optimize()`. + on_optimization_end = NULL + ) +) + +#' @title Create Asynchronous Optimization Callback +#' +#' @description +#' Function to create a [CallbackAsync]. +#' +#' Optimization callbacks can be called from different stages of optimization process. +#' The stages are prefixed with `on_*`. +#' +#' ``` +#' Start Optimization +#' - on_optimization_begin +#' Start Worker +#' - on_worker_begin +#' - on_worker_end +#' End Worker +#' - on_result +#' - on_optimization_end +#' End Optimization +#' ``` +#' +#' See also the section on parameters for more information on the stages. +#' A optimization callback works with [ContextAsync]. +#' +#' @details +#' A callback can write data to its state (`$state`), e.g. settings that affect the callback itself. +#' The [ContextAsync] allows to modify the instance, archive, optimizer and final result. +#' +#' @param id (`character(1)`)\cr +#' Identifier for the new instance. +#' @param label (`character(1)`)\cr +#' Label for the new instance. +#' @param man (`character(1)`)\cr +#' String in the format `[pkg]::[topic]` pointing to a manual page for this object. +#' The referenced help package can be opened via method `$help()`. +#' @param on_optimization_begin (`function()`)\cr +#' Stage called at the beginning of the optimization in the main process. +#' Called in `Optimizer$optimize()`. +#' The functions must have two arguments named `callback` and `context`. +#' @param on_worker_begin (`function()`)\cr +#' Stage called at the beginning of the optimization on the worker. +#' Called in the worker loop. +#' The functions must have two arguments named `callback` and `context`. +#' @param on_worker_end (`function()`)\cr +#' Stage called at the end of the optimization on the worker. +#' Called in the worker loop. +#' The functions must have two arguments named `callback` and `context`. +#' @param on_result (`function()`)\cr +#' Stage called after result are written. +#' Called in `OptimInstance$assign_result()`. +#' The functions must have two arguments named `callback` and `context`. +#' @param on_optimization_end (`function()`)\cr +#' Stage called at the end of the optimization in the main process. +#' Called in `Optimizer$optimize()`. +#' The functions must have two arguments named `callback` and `context`. +#' +#' @export +callback_async = function( + id, + label = NA_character_, + man = NA_character_, + on_optimization_begin = NULL, + on_worker_begin = NULL, + on_worker_end = NULL, + on_result = NULL, + on_optimization_end = NULL + ) { + stages = discard(set_names(list( + on_optimization_begin, + on_worker_begin, + on_worker_end, + on_result, + on_optimization_end), + c("on_optimization_begin", + "on_worker_begin", + "on_worker_end", + "on_result", + "on_optimization_end")), is.null) + walk(stages, function(stage) assert_function(stage, args = c("callback", "context"))) + callback = CallbackAsync$new(id, label, man) + iwalk(stages, function(stage, name) callback[[name]] = stage) + callback +} diff --git a/R/CallbackOptimization.R b/R/CallbackBatch.R similarity index 76% rename from R/CallbackOptimization.R rename to R/CallbackBatch.R index 30400d615..26f69586b 100644 --- a/R/CallbackOptimization.R +++ b/R/CallbackBatch.R @@ -1,21 +1,21 @@ -#' @title Create Optimization Callback +#' @title Create Batch Optimization Callback #' #' @description -#' Specialized [mlr3misc::Callback] for optimization. +#' Specialized [mlr3misc::Callback] for batch optimization. #' Callbacks allow to customize the behavior of processes in bbotk. -#' The [callback_optimization()] function creates a [CallbackOptimization]. +#' The [callback_batch()] function creates a [CallbackBatch]. #' Predefined callbacks are stored in the [dictionary][mlr3misc::Dictionary] [mlr_callbacks] and can be retrieved with [clbk()]. -#' For more information on optimization callbacks see [callback_optimization()]. +#' For more information on optimization callbacks see [callback_batch()]. #' #' @export #' @examples #' # write archive to disk -#' callback_optimization("bbotk.backup", +#' callback_batch("bbotk.backup", #' on_optimization_end = function(callback, context) { #' saveRDS(context$instance$archive, "archive.rds") #' } #' ) -CallbackOptimization = R6Class("CallbackOptimization", +CallbackBatch = R6Class("CallbackBatch", inherit = Callback, public = list( @@ -46,10 +46,10 @@ CallbackOptimization = R6Class("CallbackOptimization", ) ) -#' @title Create Optimization Callback +#' @title Create Batch Optimization Callback #' #' @description -#' Function to create a [CallbackOptimization]. +#' Function to create a [CallbackBatch]. #' #' Optimization callbacks can be called from different stages of optimization process. #' The stages are prefixed with `on_*`. @@ -67,11 +67,11 @@ CallbackOptimization = R6Class("CallbackOptimization", #' ``` #' #' See also the section on parameters for more information on the stages. -#' A optimization callback works with [ContextOptimization]. +#' A optimization callback works with [ContextBatch]. #' #' @details #' A callback can write data to its state (`$state`), e.g. settings that affect the callback itself. -#' The [ContextOptimization] allows to modify the instance, archive, optimizer and final result. +#' The [ContextBatch] allows to modify the instance, archive, optimizer and final result. #' #' #' @param id (`character(1)`)\cr @@ -105,11 +105,32 @@ CallbackOptimization = R6Class("CallbackOptimization", #' List of additional fields. #' #' @export -#' @inherit CallbackOptimization examples -callback_optimization = function(id, label = NA_character_, man = NA_character_, on_optimization_begin = NULL, on_optimizer_before_eval = NULL, on_optimizer_after_eval = NULL, on_result = NULL, on_optimization_end = NULL, fields = list()) { - stages = discard(set_names(list(on_optimization_begin, on_optimizer_before_eval, on_optimizer_after_eval, on_result, on_optimization_end), c("on_optimization_begin", "on_optimizer_before_eval", "on_optimizer_after_eval", "on_result", "on_optimization_end")), is.null) +#' @inherit CallbackBatch examples +callback_batch = function( + id, + label = NA_character_, + man = NA_character_, + on_optimization_begin = NULL, + on_optimizer_before_eval = NULL, + on_optimizer_after_eval = NULL, + on_result = NULL, + on_optimization_end = NULL, + fields = list() + ) { + stages = discard(set_names(list( + on_optimization_begin, + on_optimizer_before_eval, + on_optimizer_after_eval, + on_result, + on_optimization_end), + c( + "on_optimization_begin", + "on_optimizer_before_eval", + "on_optimizer_after_eval", + "on_result", + "on_optimization_end")), is.null) walk(stages, function(stage) assert_function(stage, args = c("callback", "context"))) - callback = CallbackOptimization$new(id, label, man) + callback = CallbackBatch$new(id, label, man) iwalk(stages, function(stage, name) callback[[name]] = stage) callback } diff --git a/R/ContextAsync.R b/R/ContextAsync.R new file mode 100644 index 000000000..814cca35c --- /dev/null +++ b/R/ContextAsync.R @@ -0,0 +1,45 @@ +#' @title Asynchronous Optimization Context +#' +#' @description +#' A [CallbackAsync] accesses and modifies data during the optimization via the `ContextAsync`. +#' See the section on active bindings for a list of modifiable objects. +#' See [callback_async()] for a list of stages which access `ContextAsync`. +#' +#' @details +#' Changes to `$instance` and `$optimizer` in the stages executed on the workers are not reflected in the main process. +#' +#' @export +ContextAsync = R6Class("ContextAsync", + inherit = Context, + public = list( + + #' @field instance ([OptimInstance]). + instance = NULL, + + #' @field optimizer ([Optimizer]). + optimizer = NULL, + + #' @description + #' Creates a new instance of this [R6][R6::R6Class] class. + #' + #' @param inst ([OptimInstance]). + #' @param optimizer ([Optimizer]). + initialize = function(inst, optimizer) { + self$instance = assert_instance_async(inst) + self$optimizer = optimizer + } + ), + + active = list( + + #' @field result ([data.table::data.table])\cr + #' The result of the optimization. + result = function(rhs) { + if (missing(rhs)) { + get_private(self$instance)$.result + } else { + get_private(self$instance, ".result") = rhs + } + } + ) +) diff --git a/R/ContextOptimization.R b/R/ContextBatch.R similarity index 67% rename from R/ContextOptimization.R rename to R/ContextBatch.R index 6c4b7ffe9..a6e99babb 100644 --- a/R/ContextOptimization.R +++ b/R/ContextBatch.R @@ -1,12 +1,12 @@ -#' @title Optimization Context +#' @title Batch Optimization Context #' #' @description -#' The [ContextOptimization] allows [mlr3misc::Callback]s to access and modify data while optimization. -#' See section on active bindings for a list of modifiable objects. -#' See [callback_optimization()] for a list of stages which access [ContextOptimization]. +#' A [CallbackBatch] accesses and modifies data during the optimization via the `ContextBatch`. +#' See the section on active bindings for a list of modifiable objects. +#' See [callback_batch()] for a list of stages which that `ContextBatch`. #' #' @export -ContextOptimization = R6Class("ContextOptimization", +ContextBatch = R6Class("ContextBatch", inherit = Context, public = list( @@ -19,10 +19,10 @@ ContextOptimization = R6Class("ContextOptimization", #' @description #' Creates a new instance of this [R6][R6::R6Class] class. #' - #' @param instance ([OptimInstance]). + #' @param inst ([OptimInstance]). #' @param optimizer ([Optimizer]). - initialize = function(instance, optimizer) { - self$instance = assert_class(instance, "OptimInstance") + initialize = function(inst, optimizer) { + self$instance = assert_instance_batch(inst) self$optimizer = optimizer } ), diff --git a/R/Objective.R b/R/Objective.R index 7d3bdb5bb..5dab19ddb 100644 --- a/R/Objective.R +++ b/R/Objective.R @@ -1,12 +1,13 @@ -#' @title Objective function with domain and co-domain +#' @title Objective Function with Domain and Codomain #' #' @description -#' Describes a black-box objective function that maps an arbitrary domain to a -#' numerical codomain. +#' The `Objective` class describes a black-box objective function that maps an arbitrary domain to a numerical codomain. #' -#' @section Technical details: -#' `Objective` objects can have the following properties: `"noisy"`, -#' `"deterministic"`, `"single-crit"` and `"multi-crit"`. +#' @details +#' `Objective` objects can have the following properties: `"noisy"`, `"deterministic"`, `"single-crit"` and `"multi-crit"`. +#' +#' @template field_callbacks +#' @template field_context #' #' @template param_domain #' @template param_codomain @@ -41,13 +42,23 @@ Objective = R6Class("Objective", #' @field check_values (`logical(1)`)\cr check_values = NULL, + callbacks = NULL, + + context = NULL, + #' @description #' Creates a new instance of this [R6][R6::R6Class] class. #' #' @param id (`character(1)`). #' @param properties (`character()`). - initialize = function(id = "f", properties = character(), domain, codomain = ps(y = p_dbl(tags = "minimize")), - constants = ps(), check_values = TRUE) { + initialize = function( + id = "f", + properties = character(), + domain, + codomain = ps(y = p_dbl(tags = "minimize")), + constants = ps(), + check_values = TRUE + ) { self$id = assert_string(id) self$domain = assert_param_set(domain) assert_param_set(codomain) @@ -163,6 +174,8 @@ Objective = R6Class("Objective", }, deep_clone = function(name, value) { + if (name == "context") return(NULL) + if (!is.environment(value)) return(value) switch(name, domain = value$clone(deep = TRUE), codomain = value$clone(deep = TRUE), diff --git a/R/OptimInstance.R b/R/OptimInstance.R index 39a318028..18aae2c5e 100644 --- a/R/OptimInstance.R +++ b/R/OptimInstance.R @@ -1,89 +1,60 @@ -#' @title Optimization Instance with budget and archive +#' @title Optimization Instance #' #' @description -#' Abstract base class. +#' The `OptimInstance` specifies an optimization problem for an [Optimizer]. #' -#' @section Technical details: -#' The [Optimizer] writes the final result to the `.result` field by using -#' the `$assign_result()` method. `.result` stores a [data.table::data.table] -#' consisting of x values in the *search space*, (transformed) x values in the -#' *domain space* and y values in the *codomain space* of the [Objective]. The -#' user can access the results with active bindings (see below). +#' @details +#' `OptimInstance` is an abstract base class that implements the base functionality each instance must provide. +#' The [Optimizer] writes the final result to the `.result` field by using the `$assign_result()` method. +#' `.result` stores a [data.table::data.table] consisting of x values in the *search space*, (transformed) x values in the *domain space* and y values in the *codomain space* of the [Objective]. +#' The user can access the results with active bindings (see below). #' -#' @template param_xdt +#' @template param_objective #' @template param_search_space -#' @template param_keep_evals +#' @template param_terminator +#' @template param_check_values #' @template param_callbacks +#' @template param_archive +#' +#' @template param_xdt +#' +#' @template field_objective +#' @template field_search_space +#' @template field_terminator +#' @template field_archive +#' @template field_progressor +#' +#' #' @export OptimInstance = R6Class("OptimInstance", public = list( - #' @field objective ([Objective]). objective = NULL, - #' @field search_space ([paradox::ParamSet]). search_space = NULL, - #' @field terminator ([Terminator]). terminator = NULL, - #' @field archive ([Archive]). archive = NULL, - #' @field progressor (`progressor()`)\cr - #' Stores `progressor` function. progressor = NULL, - #' @field objective_multiplicator (`integer()`). - objective_multiplicator = NULL, - - #' @field callbacks (List of [CallbackOptimization]s). - callbacks = NULL, - #' @description #' Creates a new instance of this [R6][R6::R6Class] class. - #' - #' @param objective ([Objective]). - #' @param terminator ([Terminator]). - #' @param check_values (`logical(1)`)\cr - #' Should x-values that are added to the archive be checked for validity? - #' Search space that is logged into archive. - initialize = function(objective, search_space = NULL, terminator, keep_evals = "all", check_values = TRUE, callbacks = list()) { + initialize = function( + objective, + search_space = NULL, + terminator, + check_values = TRUE, + callbacks = NULL, + archive = NULL + ) { self$objective = assert_r6(objective, "Objective") + self$objective$callbacks = assert_callbacks(as_callbacks(callbacks)) + self$search_space = assert_param_set(search_space) self$terminator = assert_terminator(terminator, self) - assert_choice(keep_evals, c("all", "best")) assert_flag(check_values) - self$callbacks = assert_callbacks(as_callbacks(callbacks)) - - # set search space - domain_search_space = self$objective$domain$search_space() - self$search_space = if (is.null(search_space) && domain_search_space$length == 0) { - # use whole domain as search space - self$objective$domain - } else if (is.null(search_space) && domain_search_space$length > 0) { - # create search space from tune token in domain - domain_search_space - } else if (!is.null(search_space) && domain_search_space$length == 0) { - # use supplied search space - assert_param_set(search_space) - } else { - stop("If the domain contains TuneTokens, you cannot supply a search_space.") - } - - # use minimal archive if only best points are needed - self$archive = if (keep_evals == "all") { - Archive$new(search_space = self$search_space, codomain = objective$codomain, check_values = check_values) - } else if (keep_evals == "best") { - ArchiveBest$new(search_space = self$search_space, codomain = objective$codomain, check_values = check_values) - } - - # disable objective function if search space is not all numeric - if (!self$search_space$all_numeric) { - private$.objective_function = objective_error - } else { - private$.objective_function = objective_function - } - self$objective_multiplicator = self$objective$codomain$maximization_to_minimization + self$archive = assert_r6(archive, "Archive") }, #' @description @@ -118,52 +89,8 @@ OptimInstance = R6Class("OptimInstance", }, #' @description - #' Evaluates all input values in `xdt` by calling - #' the [Objective]. Applies possible transformations to the input values - #' and writes the results to the [Archive]. - #' - #' Before each batch-evaluation, the [Terminator] is checked, and if it - #' is positive, an exception of class `terminated_error` is raised. This - #' function should be internally called by the [Optimizer]. - #' @param xdt (`data.table::data.table()`)\cr - #' x values as `data.table()` with one point per row. Contains the value in - #' the *search space* of the [OptimInstance] object. Can contain additional - #' columns for extra information. - eval_batch = function(xdt) { - private$.xdt = xdt - call_back("on_optimizer_before_eval", self$callbacks, private$.context) - # update progressor - if (!is.null(self$progressor)) self$progressor$update(self$terminator, self$archive) - - if (self$is_terminated) stop(terminated_error(self)) - assert_data_table(xdt) - assert_names(colnames(xdt), must.include = self$search_space$ids()) - - lg$info("Evaluating %i configuration(s)", max(1, nrow(xdt))) - xss_trafoed = NULL - if (!nrow(xdt)) { - # eval if search space is empty - ydt = self$objective$eval_many(list(list())) - } else if (!self$search_space$has_trafo && !self$search_space$has_deps && inherits(self$objective, "ObjectiveRFunDt")) { - # if search space has no transformation function and dependencies, and the objective takes a data table - # use shortcut to skip conversion between data table and list - ydt = self$objective$eval_dt(private$.xdt[, self$search_space$ids(), with = FALSE]) - } else { - xss_trafoed = transform_xdt_to_xss(private$.xdt, self$search_space) - ydt = self$objective$eval_many(xss_trafoed) - } - - self$archive$add_evals(xdt, xss_trafoed, ydt) - lg$info("Result of batch %i:", self$archive$n_batch) - lg$info(capture.output(print(cbind(xdt, ydt), - class = FALSE, row.names = FALSE, print.keys = FALSE))) - call_back("on_optimizer_after_eval", self$callbacks, private$.context) - return(invisible(ydt[, self$archive$cols_y, with = FALSE])) - }, - - #' @description - #' The [Optimizer] object writes the best found point - #' and estimated performance value here. For internal use. + #' The [Optimizer] object writes the best found point and estimated performance value here. + #' For internal use. #' #' @param xdt (`data.table::data.table()`)\cr #' x values as `data.table::data.table()` with one row. Contains the value in the @@ -175,28 +102,13 @@ OptimInstance = R6Class("OptimInstance", stop("Abstract class") }, - #' @description - #' Evaluates (untransformed) points of only numeric values. Returns a - #' numeric scalar for single-crit or a numeric vector for multi-crit. The - #' return value(s) are negated if the measure is maximized. Internally, - #' `$eval_batch()` is called with a single row. This function serves as a - #' objective function for optimizers of numeric spaces - which should always - #' be minimized. - #' - #' @param x (`numeric()`)\cr - #' Untransformed points. - #' - #' @return Objective value as `numeric(1)`, negated for maximization problems. - objective_function = function(x) { - private$.objective_function(x, self, self$objective_multiplicator) - }, - #' @description #' Reset terminator and clear all evaluation results from archive and results. clear = function() { self$archive$clear() private$.result = NULL self$progressor = NULL + self$objective$context = NULL invisible(self) } ), @@ -214,18 +126,6 @@ OptimInstance = R6Class("OptimInstance", private$.result[, self$search_space$ids(), with = FALSE] }, - #' @field result_x_domain (`list()`)\cr - #' (transformed) x part of the result in the *domain space* of the objective. - result_x_domain = function() { - private$.result$x_domain[[1]] - }, - - #' @field result_y (`numeric()`)\cr - #' Optimal outcome. - result_y = function() { - unlist(private$.result[, self$objective$codomain$ids(), with = FALSE]) - }, - #' @field is_terminated (`logical(1)`). is_terminated = function() { self$terminator$is_terminated(self$archive) @@ -233,10 +133,7 @@ OptimInstance = R6Class("OptimInstance", ), private = list( - .xdt = NULL, .result = NULL, - .objective_function = NULL, - .context = NULL, deep_clone = function(name, value) { switch(name, @@ -250,16 +147,20 @@ OptimInstance = R6Class("OptimInstance", ) ) -objective_function = function(x, inst, maximization_to_minimization) { - xs = set_names(as.list(x), inst$search_space$ids()) - inst$search_space$assert(xs) - xdt = as.data.table(xs) - res = inst$eval_batch(xdt) - y = as.numeric(res[, inst$objective$codomain$target_ids, with = FALSE]) - y * maximization_to_minimization -} - -objective_error = function(x, inst, maximization_to_minimization) { - stop("$objective_function can only be called if search_space only - contains numeric values") +# used by OptimInstance and OptimInstanceAsync +choose_search_space = function(objective, search_space) { + # create search space + domain_search_space = objective$domain$search_space() + if (is.null(search_space) && domain_search_space$length == 0) { + # use whole domain as search space + objective$domain + } else if (is.null(search_space) && domain_search_space$length > 0) { + # create search space from tune token in domain + domain_search_space + } else if (!is.null(search_space) && domain_search_space$length == 0) { + # use supplied search space + assert_param_set(search_space) + } else { + stop("If the domain contains TuneTokens, you cannot supply a search_space.") + } } diff --git a/R/OptimInstanceAsync.R b/R/OptimInstanceAsync.R new file mode 100644 index 000000000..df8ca80b4 --- /dev/null +++ b/R/OptimInstanceAsync.R @@ -0,0 +1,90 @@ +#' @title Optimization Instance for Asynchronous Optimization +#' +#' @include OptimInstance.R +#' +#' @description +#' The `OptimInstanceAsync` specifies an optimization problem for an [OptimizerAsync]. +#' The function [oi_async()] creates an [OptimInstanceAsyncSingleCrit] or [OptimInstanceAsyncMultiCrit]. +#' +#' @details +#' `OptimInstanceAsync` is an abstract base class that implements the base functionality each instance must provide. +#' +#' @template param_objective +#' @template param_search_space +#' @template param_terminator +#' @template param_check_values +#' @template param_callbacks +#' @template param_archive +#' @template param_rush +#' +#' @template field_rush +#' +#' @export +OptimInstanceAsync = R6Class("OptimInstanceAsync", + inherit = OptimInstance, + cloneable = FALSE, + public = list( + + rush = NULL, + + #' @description + #' Creates a new instance of this [R6][R6::R6Class] class. + initialize = function( + objective, + search_space = NULL, + terminator, + check_values = FALSE, + callbacks = NULL, + archive = NULL, + rush = NULL + ) { + assert_r6(objective, "Objective") + search_space = choose_search_space(objective, search_space) + self$rush = assert_rush(rush, null_ok = TRUE) %??% rsh() + + # archive is passed when a downstream packages creates a new archive class + archive = if (is.null(archive)) { + ArchiveAsync$new( + search_space = search_space, + codomain = objective$codomain, + check_values = check_values, + rush = self$rush) + } else { + assert_r6(archive, "ArchiveAsync") + } + + super$initialize( + objective = objective, + search_space = search_space, + terminator = terminator, + callbacks = callbacks, + archive = archive) + }, + + #' @description + #' Printer. + #' + #' @param ... (ignored). + print = function(...) { + super$print() + catf(str_indent("* Workers:", self$rush$n_workers)) + }, + + #' @description + #' Reset terminator and clear all evaluation results from archive and results. + clear = function() { + self$rush$reset() + super$clear() + } + ), + + private = list( + + # initialize context for optimization + .initialize_context = function(optimizer) { + context = ContextAsync$new(inst = self, optimizer = optimizer) + self$objective$context = context + } + ) +) + diff --git a/R/OptimInstanceAsyncMultiCrit.R b/R/OptimInstanceAsyncMultiCrit.R new file mode 100644 index 000000000..8626fe761 --- /dev/null +++ b/R/OptimInstanceAsyncMultiCrit.R @@ -0,0 +1,79 @@ +#' @title Multi Criteria Optimization Instance for Asynchronous Optimization +#' +#' @description +#' The [OptimInstanceAsyncMultiCrit] specifies an optimization problem for an [OptimizerAsync]. +#' The function [oi_async()] creates an [OptimInstanceAsyncMultiCrit]. +#' +#' @template param_objective +#' @template param_search_space +#' @template param_terminator +#' @template param_check_values +#' @template param_callbacks +#' @template param_archive +#' @template param_rush +#' +#' @template param_xdt +#' +#' @export +OptimInstanceAsyncMultiCrit = R6Class("OptimInstanceAsyncMultiCrit", + inherit = OptimInstanceAsync, + public = list( + + #' @description + #' Creates a new instance of this [R6][R6::R6Class] class. + initialize = function( + objective, + search_space = NULL, + terminator, + check_values = FALSE, + callbacks = NULL, + archive = NULL, + rush = NULL + ) { + if (objective$codomain$target_length == 1) { + stop("Codomain length must be greater than 1.") + } + super$initialize( + objective = objective, + search_space = search_space, + terminator = terminator, + check_values = check_values, + rush = rush, + callbacks = callbacks, + archive = archive) + }, + + #' @description + #' The [OptimizerAsync] writes the best found points and estimated performance values here (probably the Pareto set / front). + #' For internal use. + #' + #' @param ydt (`numeric(1)`)\cr + #' Optimal outcomes, e.g. the Pareto front. + assign_result = function(xdt, ydt) { + # FIXME: We could have one way that just lets us put a 1xn DT as result directly. + assert_data_table(xdt) + assert_names(names(xdt), must.include = self$search_space$ids()) + assert_data_table(ydt) + assert_names(names(ydt), permutation.of = self$objective$codomain$ids()) + x_domain = transform_xdt_to_xss(xdt, self$search_space) + if (length(x_domain) == 0) x_domain = list(list()) + private$.result = cbind(xdt, x_domain = x_domain, ydt) + call_back("on_result", self$objective$callbacks, self$objective$context) + } + ), + + active = list( + + #' @field result_x_domain (`list()`)\cr + #' (transformed) x part of the result in the *domain space* of the objective. + result_x_domain = function() { + private$.result$x_domain + }, + + #' @field result_y (`numeric(1)`)\cr + #' Optimal outcome. + result_y = function() { + private$.result[, self$objective$codomain$ids(), with = FALSE] + } + ) +) diff --git a/R/OptimInstanceAsyncSingleCrit.R b/R/OptimInstanceAsyncSingleCrit.R new file mode 100644 index 000000000..9d7afa877 --- /dev/null +++ b/R/OptimInstanceAsyncSingleCrit.R @@ -0,0 +1,80 @@ +#' @title Single Criterion Optimization Instance for Asynchronous Optimization +#' +#' @description +#' The `OptimInstanceAsyncSingleCrit` specifies an optimization problem for an [OptimizerAsync]. +#' The function [oi_async()] creates an [OptimInstanceAsyncSingleCrit]. +#' +#' @template param_objective +#' @template param_search_space +#' @template param_terminator +#' @template param_check_values +#' @template param_callbacks +#' @template param_archive +#' @template param_rush +#' +#' @template param_xdt +#' +#' @export +OptimInstanceAsyncSingleCrit = R6Class("OptimInstanceAsyncSingleCrit", + inherit = OptimInstanceAsync, + public = list( + + #' @description + #' Creates a new instance of this [R6][R6::R6Class] class. + initialize = function( + objective, + search_space = NULL, + terminator, + check_values = FALSE, + callbacks = NULL, + archive = NULL, + rush = NULL + ) { + if (objective$codomain$target_length > 1) { + stop("Codomain length must be 1.") + } + + super$initialize( + objective = objective, + search_space = search_space, + terminator = terminator, + check_values = check_values, + callbacks = callbacks, + archive = archive, + rush = rush) + }, + + #' @description + #' The [OptimizerAsync] object writes the best found point and estimated performance value here. + #' For internal use. + #' + #' @param y (`numeric(1)`)\cr + #' Optimal outcome. + assign_result = function(xdt, y) { + # FIXME: We could have one way that just lets us put a 1xn DT as result directly. + assert_data_table(xdt) + assert_names(names(xdt), must.include = self$search_space$ids()) + assert_number(y) + assert_names(names(y), permutation.of = self$objective$codomain$target_ids) + x_domain = unlist(transform_xdt_to_xss(xdt, self$search_space), recursive = FALSE) + if (is.null(x_domain)) x_domain = list() + private$.result = cbind(xdt, x_domain = list(x_domain), t(y)) # t(y) so the name of y stays + call_back("on_result", self$objective$callbacks, self$objective$context) + } + ), + + active = list( + + #' @field result_x_domain (`list()`)\cr + #' (transformed) x part of the result in the *domain space* of the objective. + result_x_domain = function() { + private$.result$x_domain[[1]] + }, + + #' @field result_y (`numeric()`)\cr + #' Optimal outcome. + result_y = function() { + unlist(private$.result[, self$objective$codomain$ids(), with = FALSE]) + } + ) +) diff --git a/R/OptimInstanceBatch.R b/R/OptimInstanceBatch.R new file mode 100644 index 000000000..6ae3801e3 --- /dev/null +++ b/R/OptimInstanceBatch.R @@ -0,0 +1,203 @@ +#' @title Optimization Instance for Batch Optimization +#' +#' @description +#' The `OptimInstanceBatch` specifies an optimization problem for an [OptimizerBatch]. +#' The function [oi()] creates an [OptimInstanceAsyncSingleCrit] or [OptimInstanceAsyncMultiCrit]. +#' +#' @template param_objective +#' @template param_search_space +#' @template param_terminator +#' @template param_check_values +#' @template param_callbacks +#' @template param_archive +#' +#' @export +OptimInstanceBatch = R6Class("OptimInstanceBatch", + inherit = OptimInstance, + public = list( + + #' @field objective_multiplicator (`integer()`). + objective_multiplicator = NULL, + + #' @description + #' Creates a new instance of this [R6][R6::R6Class] class. + initialize = function( + objective, + search_space = NULL, + terminator, + check_values = TRUE, + callbacks = NULL, + archive = NULL + ) { + assert_r6(objective, "Objective") + search_space = choose_search_space(objective, search_space) + + # archive is passed when a downstream packages creates a new archive class + archive = if (is.null(archive)) { + ArchiveBatch$new( + search_space = search_space, + codomain = objective$codomain, + check_values = check_values) + } else { + assert_r6(archive, "ArchiveBatch") + } + + super$initialize( + objective = objective, + search_space = search_space, + terminator = terminator, + callbacks = callbacks, + archive = archive + ) + + # disable objective function if search space is not all numeric + private$.objective_function = if (!self$search_space$all_numeric) objective_error else objective_function + self$objective_multiplicator = self$objective$codomain$maximization_to_minimization + }, + + + #' @description + #' Evaluates all input values in `xdt` by calling + #' the [Objective]. Applies possible transformations to the input values + #' and writes the results to the [Archive]. + #' + #' Before each batch-evaluation, the [Terminator] is checked, and if it + #' is positive, an exception of class `terminated_error` is raised. This + #' function should be internally called by the [Optimizer]. + #' @param xdt (`data.table::data.table()`)\cr + #' x values as `data.table()` with one point per row. Contains the value in + #' the *search space* of the [OptimInstance] object. Can contain additional + #' columns for extra information. + eval_batch = function(xdt) { + private$.xdt = xdt + call_back("on_optimizer_before_eval", self$objective$callbacks, self$objective$context) + # update progressor + if (!is.null(self$progressor)) self$progressor$update(self$terminator, self$archive) + + if (self$is_terminated) stop(terminated_error(self)) + assert_data_table(xdt) + assert_names(colnames(xdt), must.include = self$search_space$ids()) + + lg$info("Evaluating %i configuration(s)", max(1, nrow(xdt))) + xss_trafoed = NULL + if (!nrow(xdt)) { + # eval if search space is empty + ydt = self$objective$eval_many(list(list())) + } else if (!self$search_space$has_trafo && !self$search_space$has_deps && inherits(self$objective, "ObjectiveRFunDt")) { + # if search space has no transformation function and dependencies, and the objective takes a data table + # use shortcut to skip conversion between data table and list + ydt = self$objective$eval_dt(private$.xdt[, self$search_space$ids(), with = FALSE]) + } else { + xss_trafoed = transform_xdt_to_xss(private$.xdt, self$search_space) + ydt = self$objective$eval_many(xss_trafoed) + } + + self$archive$add_evals(xdt, xss_trafoed, ydt) + lg$info("Result of batch %i:", self$archive$n_batch) + lg$info(capture.output(print(cbind(xdt, ydt), + class = FALSE, row.names = FALSE, print.keys = FALSE))) + call_back("on_optimizer_after_eval", self$objective$callbacks, self$objective$context) + return(invisible(ydt[, self$archive$cols_y, with = FALSE])) + }, + + #' @description + #' Evaluates (untransformed) points of only numeric values. Returns a + #' numeric scalar for single-crit or a numeric vector for multi-crit. The + #' return value(s) are negated if the measure is maximized. Internally, + #' `$eval_batch()` is called with a single row. This function serves as a + #' objective function for optimizers of numeric spaces - which should always + #' be minimized. + #' + #' @param x (`numeric()`)\cr + #' Untransformed points. + #' + #' @return Objective value as `numeric(1)`, negated for maximization problems. + objective_function = function(x) { + private$.objective_function(x, self, self$objective_multiplicator) + } + ), + + active = list( + #' @field result ([data.table::data.table])\cr + #' Get result + result = function() { + private$.result + }, + + #' @field result_x_search_space ([data.table::data.table])\cr + #' x part of the result in the *search space*. + result_x_search_space = function() { + private$.result[, self$search_space$ids(), with = FALSE] + }, + + #' @field result_x_domain (`list()`)\cr + #' (transformed) x part of the result in the *domain space* of the objective. + result_x_domain = function() { + private$.result$x_domain[[1]] + }, + + #' @field result_y (`numeric()`)\cr + #' Optimal outcome. + result_y = function() { + unlist(private$.result[, self$objective$codomain$ids(), with = FALSE]) + }, + + #' @field is_terminated (`logical(1)`). + is_terminated = function() { + self$terminator$is_terminated(self$archive) + } + ), + + private = list( + .xdt = NULL, + .objective_function = NULL, + + # initialize context for optimization + .initialize_context = function(optimizer) { + context = ContextBatch$new(inst = self, optimizer = optimizer) + self$objective$context = context + }, + + deep_clone = function(name, value) { + switch(name, + objective = value$clone(deep = TRUE), + search_space = value$clone(deep = TRUE), + terminator = value$clone(deep = TRUE), + archive = value$clone(deep = TRUE), + value + ) + } + ) +) + +objective_function = function(x, inst, maximization_to_minimization) { + xs = set_names(as.list(x), inst$search_space$ids()) + inst$search_space$assert(xs) + xdt = as.data.table(xs) + res = inst$eval_batch(xdt) + y = as.numeric(res[, inst$objective$codomain$target_ids, with = FALSE]) + y * maximization_to_minimization +} + +objective_error = function(x, inst, maximization_to_minimization) { + stop("$objective_function can only be called if search_space only + contains numeric values") +} + +# used by OptimInstance and OptimInstanceAsync +choose_search_space = function(objective, search_space) { + # create search space + domain_search_space = objective$domain$search_space() + if (is.null(search_space) && domain_search_space$length == 0) { + # use whole domain as search space + objective$domain + } else if (is.null(search_space) && domain_search_space$length > 0) { + # create search space from tune token in domain + domain_search_space + } else if (!is.null(search_space) && domain_search_space$length == 0) { + # use supplied search space + assert_param_set(search_space) + } else { + stop("If the domain contains TuneTokens, you cannot supply a search_space.") + } +} diff --git a/R/OptimInstanceBatchMultiCrit.R b/R/OptimInstanceBatchMultiCrit.R new file mode 100644 index 000000000..d50e2ffc4 --- /dev/null +++ b/R/OptimInstanceBatchMultiCrit.R @@ -0,0 +1,71 @@ +#' @title Multi Criteria Optimization Instance for Batch Optimization +#' +#' @description +#' The [OptimInstanceBatchMultiCrit] specifies an optimization problem for an [OptimizerBatch]. +#' The function [oi()] creates an [OptimInstanceBatchMultiCrit]. +#' +#' @template param_objective +#' @template param_search_space +#' @template param_terminator +#' @template param_check_values +#' @template param_callbacks +#' @template param_archive +#' +#' @template param_xdt +#' @template param_ydt +#' +#' @export +OptimInstanceBatchMultiCrit = R6Class("OptimInstanceBatchMultiCrit", + inherit = OptimInstanceBatch, + public = list( + + #' @description + #' Creates a new instance of this [R6][R6::R6Class] class. + initialize = function( + objective, + search_space = NULL, + terminator, + check_values = TRUE, + callbacks = NULL, + archive = NULL + ) { + super$initialize( + objective = objective, + search_space = search_space, + terminator = terminator, + check_values = check_values, + callbacks = callbacks, + archive = archive) + }, + + #' @description + #' The [Optimizer] object writes the best found points + #' and estimated performance values here (probably the Pareto set / front). + #' For internal use. + assign_result = function(xdt, ydt) { + # FIXME: We could have one way that just lets us put a 1xn DT as result directly. + assert_data_table(xdt) + assert_names(names(xdt), must.include = self$search_space$ids()) + assert_data_table(ydt) + assert_names(names(ydt), permutation.of = self$objective$codomain$ids()) + x_domain = transform_xdt_to_xss(xdt, self$search_space) + if (length(x_domain) == 0) x_domain = list(list()) + private$.result = cbind(xdt, x_domain = x_domain, ydt) + call_back("on_result", self$objective$callbacks, self$objective$context) + } + ), + + active = list( + #' @field result_x_domain (`list()`)\cr + #' (transformed) x part of the result in the *domain space* of the objective. + result_x_domain = function() { + private$.result$x_domain + }, + + #' @field result_y (`numeric(1)`)\cr + #' Optimal outcome. + result_y = function() { + private$.result[, self$objective$codomain$ids(), with = FALSE] + } + ) +) diff --git a/R/OptimInstanceBatchSingleCrit.R b/R/OptimInstanceBatchSingleCrit.R new file mode 100644 index 000000000..f93fe2652 --- /dev/null +++ b/R/OptimInstanceBatchSingleCrit.R @@ -0,0 +1,61 @@ +#' @title Single Criterion Optimization Instance for Batch Optimization +#' +#' @description +#' The [OptimInstanceBatchSingleCrit] specifies an optimization problem for an [OptimizerBatch]. +#' The function [oi()] creates an [OptimInstanceBatchSingleCrit]. +#' +#' @template param_objective +#' @template param_search_space +#' @template param_terminator +#' @template param_check_values +#' @template param_callbacks +#' @template param_archive +#' +#' @template param_xdt +#' +#' @export +OptimInstanceBatchSingleCrit = R6Class("OptimInstanceBatchSingleCrit", + inherit = OptimInstanceBatch, + public = list( + + #' @description + #' Creates a new instance of this [R6][R6::R6Class] class. + initialize = function( + objective, + search_space = NULL, + terminator, + check_values = TRUE, + callbacks = NULL, + archive = NULL) { + + if (objective$codomain$target_length > 1) { + stop("Codomain > 1") + } + super$initialize( + objective = objective, + search_space = search_space, + terminator = terminator, + check_values = check_values, + callbacks = callbacks, + archive = archive) + }, + + #' @description + #' The [Optimizer] object writes the best found point + #' and estimated performance value here. For internal use. + #' + #' @param y (`numeric(1)`)\cr + #' Optimal outcome. + assign_result = function(xdt, y) { + # FIXME: We could have one way that just lets us put a 1xn DT as result directly. + assert_data_table(xdt) + assert_names(names(xdt), must.include = self$search_space$ids()) + assert_number(y) + assert_names(names(y), permutation.of = self$objective$codomain$target_ids) + x_domain = unlist(transform_xdt_to_xss(xdt, self$search_space), recursive = FALSE) + if (is.null(x_domain)) x_domain = list() + private$.result = cbind(xdt, x_domain = list(x_domain), t(y)) # t(y) so the name of y stays + call_back("on_result", self$objective$callbacks, self$objective$context) + } + ) +) diff --git a/R/OptimInstanceMultiCrit.R b/R/OptimInstanceMultiCrit.R deleted file mode 100644 index f59ab4f37..000000000 --- a/R/OptimInstanceMultiCrit.R +++ /dev/null @@ -1,70 +0,0 @@ -#' @title Optimization Instance with budget and archive -#' -#' @description -#' Wraps a multi-criteria [Objective] function with extra services for -#' convenient evaluation. Inherits from [OptimInstance]. -#' -#' * Automatic storing of results in an [Archive] after evaluation. -#' * Automatic checking for termination. Evaluations of design points are -#' performed in batches. Before a batch is evaluated, the [Terminator] is -#' queried for the remaining budget. If the available budget is exhausted, an -#' exception is raised, and no further evaluations can be performed from this -#' point on. -#' -#' @template param_xdt -#' @template param_ydt -#' @template param_search_space -#' @template param_keep_evals -#' @template param_callbacks -#' @export -OptimInstanceMultiCrit = R6Class("OptimInstanceMultiCrit", - inherit = OptimInstance, - public = list( - - #' @description - #' Creates a new instance of this [R6][R6::R6Class] class. - #' - #' @param objective ([Objective]). - #' @param terminator ([Terminator])\cr - #' Multi-criteria terminator. - #' @param check_values (`logical(1)`)\cr - #' Should x-values that are added to the archive be checked for validity? - #' Search space that is logged into archive. - initialize = function(objective, search_space = NULL, terminator, keep_evals = "all", check_values = TRUE, callbacks = list()) { - super$initialize(objective, search_space, terminator, keep_evals, check_values, callbacks) - }, - - #' @description - #' The [Optimizer] object writes the best found points - #' and estimated performance values here (probably the Pareto set / front). - #' For internal use. - #' - #' @param ydt (`numeric(1)`)\cr - #' Optimal outcomes, e.g. the Pareto front. - assign_result = function(xdt, ydt) { - # FIXME: We could have one way that just lets us put a 1xn DT as result directly. - assert_data_table(xdt) - assert_names(names(xdt), must.include = self$search_space$ids()) - assert_data_table(ydt) - assert_names(names(ydt), permutation.of = self$objective$codomain$ids()) - x_domain = transform_xdt_to_xss(xdt, self$search_space) - if (length(x_domain) == 0) x_domain = list(list()) - private$.result = cbind(xdt, x_domain = x_domain, ydt) - call_back("on_result", self$callbacks, private$.context) - } - ), - - active = list( - #' @field result_x_domain (`list()`)\cr - #' (transformed) x part of the result in the *domain space* of the objective. - result_x_domain = function() { - private$.result$x_domain - }, - - #' @field result_y (`numeric(1)`)\cr - #' Optimal outcome. - result_y = function() { - private$.result[, self$objective$codomain$ids(), with = FALSE] - } - ) -) diff --git a/R/OptimInstanceSingleCrit.R b/R/OptimInstanceSingleCrit.R deleted file mode 100644 index 1e40b9ca1..000000000 --- a/R/OptimInstanceSingleCrit.R +++ /dev/null @@ -1,56 +0,0 @@ -#' @title Optimization Instance with budget and archive -#' -#' @description -#' Wraps a single-criteria [Objective] function with extra services for -#' convenient evaluation. Inherits from [OptimInstance]. -#' -#' * Automatic storing of results in an [Archive] after evaluation. -#' * Automatic checking for termination. Evaluations of design points are -#' performed in batches. Before a batch is evaluated, the [Terminator] is -#' queried for the remaining budget. If the available budget is exhausted, an -#' exception is raised, and no further evaluations can be performed from this -#' point on. -#' -#' @template param_xdt -#' @template param_search_space -#' @template param_keep_evals -#' @template param_callbacks -#' @export -OptimInstanceSingleCrit = R6Class("OptimInstanceSingleCrit", - inherit = OptimInstance, - public = list( - - #' @description - #' Creates a new instance of this [R6][R6::R6Class] class. - #' - #' @param objective ([Objective]). - #' @param terminator ([Terminator]). - #' @param check_values (`logical(1)`)\cr - #' Should x-values that are added to the archive be checked for validity? - #' Search space that is logged into archive. - initialize = function(objective, search_space = NULL, terminator, keep_evals = "all", check_values = TRUE, callbacks = list()) { - if (objective$codomain$target_length > 1) { - stop("Codomain > 1") - } - super$initialize(objective, search_space, terminator, keep_evals, check_values, callbacks) - }, - - #' @description - #' The [Optimizer] object writes the best found point - #' and estimated performance value here. For internal use. - #' - #' @param y (`numeric(1)`)\cr - #' Optimal outcome. - assign_result = function(xdt, y) { - # FIXME: We could have one way that just lets us put a 1xn DT as result directly. - assert_data_table(xdt) - assert_names(names(xdt), must.include = self$search_space$ids()) - assert_number(y) - assert_names(names(y), permutation.of = self$objective$codomain$target_ids) - x_domain = unlist(transform_xdt_to_xss(xdt, self$search_space), recursive = FALSE) - if (is.null(x_domain)) x_domain = list() - private$.result = cbind(xdt, x_domain = list(x_domain), t(y)) # t(y) so the name of y stays - call_back("on_result", self$callbacks, private$.context) - } - ) -) diff --git a/R/Optimizer.R b/R/Optimizer.R index 71d5cccb1..5a35a8987 100644 --- a/R/Optimizer.R +++ b/R/Optimizer.R @@ -3,7 +3,10 @@ #' @include mlr_optimizers.R #' #' @description -#' Abstract `Optimizer` class that implements the base functionality each `Optimizer` subclass must provide. +#' The `Optimizer` implements the optimization algorithm. +#' +#' @details +#' `Optimizer` is an abstract base class that implements the base functionality each optimizer must provide. #' A `Optimizer` object describes the optimization strategy. #' A `Optimizer` object must write its result to the `$assign_result()` method of the [OptimInstance] at the end in order to store the best point and its estimated performance vector. #' @@ -21,6 +24,7 @@ #' @export Optimizer = R6Class("Optimizer", public = list( + #' @template field_id id = NULL, @@ -37,7 +41,15 @@ Optimizer = R6Class("Optimizer", #' @param packages (`character()`)\cr #' Set of required packages. #' A warning is signaled by the constructor if at least one of the packages is not installed, but loaded (not attached) later on-demand via [requireNamespace()]. - initialize = function(id = "optimizer", param_set, param_classes, properties, packages = character(), label = NA_character_, man = NA_character_) { + initialize = function( + id = "optimizer", + param_set, + param_classes, + properties, + packages = character(), + label = NA_character_, + man = NA_character_ + ) { self$id = assert_string(id, min.chars = 1L) private$.param_set = assert_param_set(param_set) private$.param_classes = assert_subset(param_classes, c("ParamLgl", "ParamInt", "ParamDbl", "ParamFct", "ParamUty")) @@ -73,22 +85,6 @@ Optimizer = R6Class("Optimizer", #' Opens the corresponding help page referenced by field `$man`. help = function() { open_help(self$man) - }, - - #' @description - #' Performs the optimization and writes optimization result into - #' [OptimInstance]. The optimization result is returned but the complete - #' optimization path is stored in [Archive] of [OptimInstance]. - #' - #' @param inst ([OptimInstance]). - #' @return [data.table::data.table]. - optimize = function(inst) { - inst$archive$start_time = Sys.time() - inst$.__enclos_env__$private$.context = ContextOptimization$new(instance = inst, optimizer = self) - call_back("on_optimization_begin", inst$callbacks, get_private(inst)$.context) - result = optimize_default(inst, self, private) - call_back("on_optimization_end", inst$callbacks, get_private(inst)$.context) - result } ), @@ -149,7 +145,6 @@ Optimizer = R6Class("Optimizer", .optimize = function(inst) stop("abstract"), .assign_result = function(inst) { - assert_r6(inst, "OptimInstance") assign_result_default(inst) }, @@ -161,3 +156,31 @@ Optimizer = R6Class("Optimizer", .man = NULL ) ) + +#' @title Default Assign Result Function +#' +#' @description +#' Used internally in the [Optimizer]. +#' It is the default way to determine the result by simply obtaining the best performing result from the archive. +#' +#' @param inst [OptimInstance] +#' +#' @keywords internal +#' @export +assign_result_default = function(inst) { + assert_r6(inst, "OptimInstance") + res = inst$archive$best() + + xdt = res[, inst$search_space$ids(), with = FALSE] + + if (inherits(inst, "OptimInstanceBatchMultiCrit") || inherits(inst, "OptimInstanceAsyncMultiCrit")) { + ydt = res[, inst$archive$cols_y, with = FALSE] + inst$assign_result(xdt, ydt) + } else { + # unlist keeps name! + y = unlist(res[, inst$archive$cols_y, with = FALSE]) + inst$assign_result(xdt, y) + } + + invisible(NULL) +} diff --git a/R/OptimizerAsync.R b/R/OptimizerAsync.R new file mode 100644 index 000000000..33f35329a --- /dev/null +++ b/R/OptimizerAsync.R @@ -0,0 +1,141 @@ +#' @title Asynchronous Optimizer +#' +#' @include mlr_optimizers.R +#' +#' @description +#' The [OptimizerAsync] implements the asynchronous optimization algorithm. +#' The optimization is performed asynchronously on a set of workers. +#' +#' @details +#' [OptimizerAsync] is the abstract base class for all asynchronous optimizers. +#' It provides the basic structure for asynchronous optimization algorithms. +#' The public method `$optimize()` is the main entry point for the optimization and runs in the main process. +#' The method starts the optimization process by starting the workers and pushing the necessary objects to the workers. +#' Optionally, a set of points can be created, e.g. an initial design, and pushed to the workers. +#' The private method `$.optimize()` is the actual optimization algorithm that runs on the workers. +#' Usually, the method proposes new points, evaluates them, and updates the archive. +#' +#' @export +OptimizerAsync = R6Class("OptimizerAsync", + inherit = Optimizer, + public = list( + + #' @description + #' Performs the optimization on a [OptimInstanceAsyncSingleCrit] or [OptimInstanceAsyncMultiCrit] until termination. + #' The single evaluations will be written into the [ArchiveAsync]. + #' The result will be written into the instance object. + #' + #' @param inst ([OptimInstanceAsyncSingleCrit] | [OptimInstanceAsyncMultiCrit]). + #' + #' @return [data.table::data.table()] + optimize = function(inst) { + optimize_async_default(inst, self) + } + ) +) + +#' @title Default Asynchronous Optimization +#' +#' @description +#' Used internally in [OptimizerAsync]. +#' +#' @param instance [OptimInstanceAsync]. +#' @param optimizer [OptimizerAsync]. +#' +#' @keywords internal +#' @export +optimize_async_default = function(instance, optimizer, design = NULL) { + assert_class(instance, "OptimInstanceAsync") + assert_class(optimizer, "OptimizerAsync") + assert_data_table(design, null.ok = TRUE) + + instance$archive$start_time = Sys.time() + get_private(instance)$.initialize_context(optimizer) + call_back("on_optimization_begin", instance$objective$callbacks, instance$objective$context) + + # send design to workers + if (!is.null(design)) instance$archive$push_points(transpose_list(design)) + + if (getOption("bbotk_local", FALSE)) { + # debug mode runs .optimize() in main process + rush = RushWorker$new(instance$rush$network_id, host = "local") + instance$rush = rush + instance$archive$rush = rush + get_private(optimizer)$.optimize(instance) + } else { + # run .optimize() on workers + + # check if there are already running workers or a rush plan is available + if (!instance$rush$n_running_workers && !rush_available()) { + stop("No running worker found and no rush plan available to start local workers.\n See `?rush::rush_plan()`") + } + + # FIXME: How to pass globals and packages? + if (!instance$rush$n_running_workers) { + lg$debug("Start %i local worker(s)", rush_config()$n_workers) + + packages = c(optimizer$packages, "bbotk") # add packages from objective + + instance$rush$start_workers( + worker_loop = bbotk_worker_loop, + packages = packages, + optimizer = optimizer, + instance = instance, + wait_for_workers = TRUE) + } + + lg$info("Starting to optimize %i parameter(s) with '%s' and '%s' on %i worker(s)", + instance$search_space$length, + optimizer$format(), + instance$terminator$format(with_params = TRUE), + instance$rush$n_running_workers + ) + } + + # wait until optimization is finished + # check terminated workers when the terminator is "none" + while(!instance$is_terminated && !instance$rush$all_workers_terminated) { + Sys.sleep(1) + instance$rush$print_log() + + # fetch new results for printing + new_results = instance$rush$fetch_new_tasks() + if (nrow(new_results)) { + lg$info("Results of %i configuration(s):", nrow(new_results)) + lg$info(capture.output(print(new_results, class = FALSE, row.names = FALSE, print.keys = FALSE))) + } + + if (instance$rush$all_workers_lost) { + stop("All workers have crashed.") + } + } + + # assign result + get_private(optimizer)$.assign_result(instance) + lg$info("Finished optimizing after %i evaluation(s)", instance$archive$n_evals) + lg$info("Result:") + lg$info(capture.output(print(instance$result, lass = FALSE, row.names = FALSE, print.keys = FALSE))) + + call_back("on_optimization_end", instance$objective$callbacks, instance$objective$context) + return(instance$result) +} + +#' @title Default Evaluation of the Queue +#' +#' @description +#' Used internally in `$.optimize()` of [OptimizerAsync] classes to evaluate a queue of points e.g. in [OptimizerAsyncGridSearch]. +#' +#' @param instance [OptimInstanceAsync]. +#' +#' @keywords internal +#' @export +evaluate_queue_default = function(instance) { + while (!instance$is_terminated && instance$archive$n_queued) { + task = instance$archive$pop_point() # FIXME: Add fields argument? + if (!is.null(task)) { + xs_trafoed = trafo_xs(task$xs, instance$search_space) + ys = instance$objective$eval(xs_trafoed) + instance$archive$push_result(task$key, ys, x_domain = xs_trafoed) + } + } +} diff --git a/R/OptimizerAsyncDesignPoints.R b/R/OptimizerAsyncDesignPoints.R new file mode 100644 index 000000000..6a56a5248 --- /dev/null +++ b/R/OptimizerAsyncDesignPoints.R @@ -0,0 +1,66 @@ +#' @title Asynchronous Optimization via Design Points +#' +#' @include OptimizerAsync.R +#' @name mlr_optimizers_async_design_points +#' +#' @description +#' `OptimizerAsyncDesignPoints` class that implements optimization w.r.t. fixed design points. +#' We simply search over a set of points fully specified by the ser. +#' +#' @templateVar id async_design_points +#' @template section_dictionary_optimizers +#' +#' @section Parameters: +#' \describe{ +#' \item{`design`}{[data.table::data.table]\cr +#' Design points to try in search, one per row.} +#' } +#' +#' @export +OptimizerAsyncDesignPoints = R6Class("OptimizerAsyncDesignPoints", + inherit = OptimizerAsync, + public = list( + + #' @description + #' Creates a new instance of this [R6][R6::R6Class] class. + initialize = function() { + param_set = ps( + design = p_uty(tags = "required", custom_check = function(x) check_data_table(x, min.rows = 1, min.cols = 1, null.ok = TRUE)) + ) + param_set$values = list(design = NULL) + super$initialize( + id = "design_points", + param_set = param_set, + param_classes = c("ParamLgl", "ParamInt", "ParamDbl", "ParamFct", "ParamUty"), + properties = c("dependencies", "single-crit", "multi-crit"), + label = "Design Points", + man = "bbotk::mlr_optimizers_async_design_points" + ) + }, + + #' @description + #' Starts the asynchronous optimization. + #' + #' @param inst ([OptimInstance]). + #' @return [data.table::data.table]. + optimize = function(inst) { + + # generate grid and send to workers + design = inst$search_space$assert_dt(self$param_set$values$design) + + optimize_async_default(inst, self, design) + } + ), + + private = list( + .optimize = function(inst) { + archive = inst$archive + + # evaluate design of points + evaluate_queue_default(inst) + } + ) +) + +mlr_optimizers$add("async_design_points", OptimizerAsyncDesignPoints) + diff --git a/R/OptimizerAsyncGridSearch.R b/R/OptimizerAsyncGridSearch.R new file mode 100644 index 000000000..912e20fa1 --- /dev/null +++ b/R/OptimizerAsyncGridSearch.R @@ -0,0 +1,75 @@ +#' @title Asynchronous Optimization via Grid Search +#' +#' @include OptimizerAsync.R +#' @name mlr_optimizers_async_grid_search +#' +#' @description +#' `OptimizerAsyncGridSearch` class that implements a grid search. +#' The grid is constructed as a Cartesian product over discretized values per parameter, see [paradox::generate_design_grid()]. +#' The points of the grid are evaluated in a random order. +#' +#' @templateVar id async_grid_search +#' @template section_dictionary_optimizers +#' +#' @section Parameters: +#' \describe{ +#' \item{`batch_size`}{`integer(1)`\cr +#' Maximum number of points to try in a batch.} +#' } +#' +#' +#' @source +#' `r format_bib("bergstra_2012")` +#' +#' @export +OptimizerAsyncGridSearch = R6Class("OptimizerAsyncGridSearch", + inherit = OptimizerAsync, + + public = list( + + #' @description + #' Creates a new instance of this [R6][R6::R6Class] class. + initialize = function() { + param_set = ps( + resolution = p_int(lower = 1L), + param_resolutions = p_uty() + ) + param_set$values = list(resolution = 10L) + + super$initialize( + id = "async_grid_search", + param_set = param_set, + param_classes = c("ParamLgl", "ParamInt", "ParamDbl", "ParamFct"), + properties = c("dependencies", "single-crit", "multi-crit"), + label = "Asynchronous Grid Search", + man = "bbotk::mlr_optimizers_async_grid_search" + ) + }, + + #' @description + #' Starts the asynchronous optimization. + #' + #' @param inst ([OptimInstance]). + #' @return [data.table::data.table]. + optimize = function(inst) { + + # generate grid + pv = self$param_set$values + design = generate_design_grid(inst$search_space, resolution = pv$resolution, param_resolutions = pv$param_resolutions)$data + + optimize_async_default(inst, self, design) + } + ), + + private = list( + .optimize = function(inst) { + archive = inst$archive + + # evaluate grid points + evaluate_queue_default(inst) + } + ) +) + +mlr_optimizers$add("async_grid_search", OptimizerAsyncGridSearch) + diff --git a/R/OptimizerAsyncRandomSearch.R b/R/OptimizerAsyncRandomSearch.R new file mode 100644 index 000000000..a6e698a72 --- /dev/null +++ b/R/OptimizerAsyncRandomSearch.R @@ -0,0 +1,63 @@ +#' @title Asynchronous Optimization via Random Search +#' +#' @include OptimizerAsync.R +#' @name mlr_optimizers_async_random_search +#' +#' @description +#' `OptimizerAsyncRandomSearch` class that implements a simple Random Search. +#' +#' @templateVar id async_random_search +#' @template section_dictionary_optimizers +#' +#' @source +#' `r format_bib("bergstra_2012")` +#' +#' @export +OptimizerAsyncRandomSearch = R6Class("OptimizerAsyncRandomSearch", + inherit = OptimizerAsync, + + public = list( + + #' @description + #' Creates a new instance of this [R6][R6::R6Class] class. + initialize = function() { + super$initialize( + id = "async_random_search", + param_set = ps(), + param_classes = c("ParamLgl", "ParamInt", "ParamDbl", "ParamFct"), + properties = c("dependencies", "single-crit", "multi-crit"), + label = "Asynchronous Random Search", + man = "bbotk::mlr_optimizers_random_search" + ) + } + ), + + private = list( + .optimize = function(inst) { + search_space = inst$search_space + + # usually the queue is empty but callbacks might have added points + evaluate_queue_default(inst) + + while(!inst$is_terminated) { + # sample new points + sampler = SamplerUnif$new(search_space) + xdt = sampler$sample(1)$data + xss = transpose_list(xdt) + xs = xss[[1]][inst$archive$cols_x] + xs_trafoed = trafo_xs(xs, search_space) + key = inst$archive$push_running_point(xs) + + # eval + ys = inst$objective$eval(xs_trafoed) + + # push result + inst$archive$push_result(key, ys = ys, x_domain = xs_trafoed) + } + } + ) +) + +mlr_optimizers$add("async_random_search", OptimizerAsyncRandomSearch) + + diff --git a/R/OptimizerBatch.R b/R/OptimizerBatch.R new file mode 100644 index 000000000..30c7d87d9 --- /dev/null +++ b/R/OptimizerBatch.R @@ -0,0 +1,72 @@ +#' @title Batch Optimizer +#' +#' @include mlr_optimizers.R +#' +#' @description +#' Abstract `OptimizerBatch` class that implements the base functionality each `OptimizerBatch` subclass must provide. +#' A `OptimizerBatch` object describes the optimization strategy. +#' A `OptimizerBatch` object must write its result to the `$assign_result()` method of the [OptimInstance] at the end in order to store the best point and its estimated performance vector. +#' +#' @template section_progress_bars +#' +#' @export +OptimizerBatch = R6Class("OptimizerBatch", + inherit = Optimizer, + + public = list( + + #' @description + #' Performs the optimization and writes optimization result into [OptimInstanceBatch]. + #' The optimization result is returned but the complete optimization path is stored in [ArchiveBatch] of [OptimInstanceBatch]. + #' + #' @param inst ([OptimInstanceBatch]). + #' @return [data.table::data.table]. + optimize = function(inst) { + optimize_batch_default(inst, self) + } + ) +) +#' @title Default Batch Optimization Function +#' +#' @description +#' Used internally in the [OptimizerBatch]. +#' +#' @param instance [OptimInstance] +#' @param optimizer [OptimizerBatch] +#' +#' @return [data.table::data.table] +#' +#' @keywords internal +#' @export +optimize_batch_default = function(instance, optimizer) { + assert_instance_properties(optimizer, instance) + + instance$archive$start_time = Sys.time() + get_private(instance)$.initialize_context(optimizer) + call_back("on_optimization_begin", instance$objective$callbacks, instance$objective$context) + + if (isNamespaceLoaded("progressr")) { + # progressor must be initialized here because progressor finishes when exiting a function since version 0.7.0 + max_steps = assert_int(instance$terminator$status(instance$archive)["max_steps"]) + unit = assert_character(instance$terminator$unit) + progressor = progressr::progressor(steps = max_steps) + instance$progressor = Progressor$new(progressor, unit) + instance$progressor$max_steps = max_steps + } + + # start optimization + lg$info("Starting to optimize %i parameter(s) with '%s' and '%s'", + instance$search_space$length, optimizer$format(), instance$terminator$format(with_params = TRUE)) + tryCatch({ + get_private(optimizer)$.optimize(instance) + }, terminated_error = function(cond) {}) + + # assign result + get_private(optimizer)$.assign_result(instance) + lg$info("Finished optimizing after %i evaluation(s)", instance$archive$n_evals) + lg$info("Result:") + lg$info(capture.output(print( + instance$result, lass = FALSE, row.names = FALSE, print.keys = FALSE))) + return(instance$result) +} + diff --git a/R/OptimizerCmaes.R b/R/OptimizerBatchCmaes.R similarity index 92% rename from R/OptimizerCmaes.R rename to R/OptimizerBatchCmaes.R index 73f752b80..e2933372e 100644 --- a/R/OptimizerCmaes.R +++ b/R/OptimizerBatchCmaes.R @@ -4,7 +4,7 @@ #' @name mlr_optimizers_cmaes #' #' @description -#' `OptimizerCmaes` class that implements CMA-ES. Calls [adagio::pureCMAES()] +#' `OptimizerBatchCmaes` class that implements CMA-ES. Calls [adagio::pureCMAES()] #' from package \CRANpkg{adagio}. The algorithm is typically applied to search #' space dimensions between three and fifty. Lower search space dimensions might #' crash. @@ -45,7 +45,7 @@ #' domain = domain, #' codomain = codomain) #' -#' instance = OptimInstanceSingleCrit$new( +#' instance = OptimInstanceBatchSingleCrit$new( #' objective = objective, #' search_space = search_space, #' terminator = trm("evals", n_evals = 10)) @@ -61,8 +61,8 @@ #' # allows access of data.table of full path of all evaluations #' as.data.table(instance$archive$data) #' } -OptimizerCmaes = R6Class("OptimizerCmaes", - inherit = Optimizer, +OptimizerBatchCmaes = R6Class("OptimizerBatchCmaes", + inherit = OptimizerBatch, public = list( #' @description @@ -103,4 +103,4 @@ OptimizerCmaes = R6Class("OptimizerCmaes", ) ) -mlr_optimizers$add("cmaes", OptimizerCmaes) +mlr_optimizers$add("cmaes", OptimizerBatchCmaes) diff --git a/R/OptimizerDesignPoints.R b/R/OptimizerBatchDesignPoints.R similarity index 91% rename from R/OptimizerDesignPoints.R rename to R/OptimizerBatchDesignPoints.R index fdba3d63b..fd245aa4d 100644 --- a/R/OptimizerDesignPoints.R +++ b/R/OptimizerBatchDesignPoints.R @@ -4,7 +4,7 @@ #' @name mlr_optimizers_design_points #' #' @description -#' `OptimizerDesignPoints` class that implements optimization w.r.t. fixed +#' `OptimizerBatchDesignPoints` class that implements optimization w.r.t. fixed #' design points. We simply search over a set of points fully specified by the #' user. The points in the design are evaluated in order as given. #' @@ -42,7 +42,7 @@ #' domain = domain, #' codomain = codomain) #' -#' instance = OptimInstanceSingleCrit$new( +#' instance = OptimInstanceBatchSingleCrit$new( #' objective = objective, #' search_space = search_space, #' terminator = trm("evals", n_evals = 10)) @@ -59,7 +59,7 @@ #' #' # Allows access of data.table of full path of all evaluations #' as.data.table(instance$archive) -OptimizerDesignPoints = R6Class("OptimizerDesignPoints", inherit = Optimizer, +OptimizerBatchDesignPoints = R6Class("OptimizerBatchDesignPoints", inherit = OptimizerBatch, public = list( #' @description @@ -100,4 +100,4 @@ OptimizerDesignPoints = R6Class("OptimizerDesignPoints", inherit = Optimizer, ) ) -mlr_optimizers$add("design_points", OptimizerDesignPoints) +mlr_optimizers$add("design_points", OptimizerBatchDesignPoints) diff --git a/R/OptimizerFocusSearch.R b/R/OptimizerBatchFocusSearch.R similarity index 97% rename from R/OptimizerFocusSearch.R rename to R/OptimizerBatchFocusSearch.R index 7daeb2a21..829f49db1 100644 --- a/R/OptimizerFocusSearch.R +++ b/R/OptimizerBatchFocusSearch.R @@ -4,7 +4,7 @@ #' @name mlr_optimizers_focus_search #' #' @description -#' `OptimizerFocusSearch` class that implements a Focus Search. +#' `OptimizerBatchFocusSearch` class that implements a Focus Search. #' #' Focus Search starts with evaluating `n_points` drawn uniformly at random. #' For 1 to `maxit` batches, `n_points` are then drawn uniformly at random and @@ -32,8 +32,8 @@ #' #' @export #' @template example -OptimizerFocusSearch = R6Class("OptimizerFocusSearch", - inherit = Optimizer, +OptimizerBatchFocusSearch = R6Class("OptimizerBatchFocusSearch", + inherit = OptimizerBatch, public = list( #' @description @@ -106,7 +106,7 @@ OptimizerFocusSearch = R6Class("OptimizerFocusSearch", ) ) -mlr_optimizers$add("focus_search", OptimizerFocusSearch) +mlr_optimizers$add("focus_search", OptimizerBatchFocusSearch) diff --git a/R/OptimizerGenSA.R b/R/OptimizerBatchGenSA.R similarity index 93% rename from R/OptimizerGenSA.R rename to R/OptimizerBatchGenSA.R index 6d868ebb3..732ed58fa 100644 --- a/R/OptimizerGenSA.R +++ b/R/OptimizerBatchGenSA.R @@ -4,7 +4,7 @@ #' @name mlr_optimizers_gensa #' #' @description -#' `OptimizerGenSA` class that implements generalized simulated annealing. Calls +#' `OptimizerBatchGenSA` class that implements generalized simulated annealing. Calls #' [GenSA::GenSA()] from package \CRANpkg{GenSA}. #' #' @templateVar id gensa @@ -50,7 +50,7 @@ #' domain = domain, #' codomain = codomain) #' -#' instance = OptimInstanceSingleCrit$new( +#' instance = OptimInstanceBatchSingleCrit$new( #' objective = objective, #' search_space = search_space, #' terminator = trm("evals", n_evals = 10)) @@ -66,7 +66,7 @@ #' # Allows access of data.table of full path of all evaluations #' as.data.table(instance$archive$data) #' } -OptimizerGenSA = R6Class("OptimizerGenSA", inherit = Optimizer, +OptimizerBatchGenSA = R6Class("OptimizerBatchGenSA", inherit = OptimizerBatch, public = list( #' @description @@ -105,7 +105,7 @@ OptimizerGenSA = R6Class("OptimizerGenSA", inherit = Optimizer, ) ) -mlr_optimizers$add("gensa", OptimizerGenSA) +mlr_optimizers$add("gensa", OptimizerBatchGenSA) # a note on smooth and simple.function # smooth: switching the local search algorithm from using L-BFGS-B (default) to Nelder-Mead approach that works better when the objective function has very few places where numerical derivatives can be computed (highly non-smooth function) diff --git a/R/OptimizerGridSearch.R b/R/OptimizerBatchGridSearch.R similarity index 90% rename from R/OptimizerGridSearch.R rename to R/OptimizerBatchGridSearch.R index 22493c2ad..291b56361 100644 --- a/R/OptimizerGridSearch.R +++ b/R/OptimizerBatchGridSearch.R @@ -4,7 +4,7 @@ #' @name mlr_optimizers_grid_search #' #' @description -#' `OptimizerGridSearch` class that implements grid search. The grid is +#' `OptimizerBatchGridSearch` class that implements grid search. The grid is #' constructed as a Cartesian product over discretized values per parameter, see #' [paradox::generate_design_grid()]. The points of the grid are evaluated in a #' random order. @@ -32,7 +32,7 @@ #' #' @export #' @template example -OptimizerGridSearch = R6Class("OptimizerGridSearch", inherit = Optimizer, +OptimizerBatchGridSearch = R6Class("OptimizerBatchGridSearch", inherit = OptimizerBatch, public = list( #' @description @@ -69,4 +69,4 @@ OptimizerGridSearch = R6Class("OptimizerGridSearch", inherit = Optimizer, ) ) -mlr_optimizers$add("grid_search", OptimizerGridSearch) +mlr_optimizers$add("grid_search", OptimizerBatchGridSearch) diff --git a/R/OptimizerIrace.R b/R/OptimizerBatchIrace.R similarity index 96% rename from R/OptimizerIrace.R rename to R/OptimizerBatchIrace.R index 3ed3783f5..680693739 100644 --- a/R/OptimizerIrace.R +++ b/R/OptimizerBatchIrace.R @@ -4,7 +4,7 @@ #' @name mlr_optimizers_irace #' #' @description -#' `OptimizerIrace` class that implements iterated racing. Calls +#' `OptimizerBatchIrace` class that implements iterated racing. Calls #' [irace::irace()] from package \CRANpkg{irace}. #' #' @section Parameters: @@ -20,7 +20,7 @@ #' For the meaning of all other parameters, see [irace::defaultScenario()]. Note #' that we have removed all control parameters which refer to the termination of #' the algorithm. Use [TerminatorEvals] instead. Other terminators do not work -#' with `OptimizerIrace`. +#' with `OptimizerBatchIrace`. #' #' In contrast to [irace::defaultScenario()], we set `digits = 15`. #' This represents double parameters with a higher precision and avoids rounding errors. @@ -28,7 +28,7 @@ #' @section Target Runner and Instances: #' The irace package uses a `targetRunner` script or R function to evaluate a #' configuration on a particular instance. Usually it is not necessary to -#' specify a `targetRunner` function when using `OptimizerIrace`. A default +#' specify a `targetRunner` function when using `OptimizerBatchIrace`. A default #' function is used that forwards several configurations and instances to the #' user defined objective function. As usually, the user defined function has #' a `xs`, `xss` or `xdt` parameter depending on the used [Objective] class. @@ -66,6 +66,7 @@ #' #' @export #' @examples +#' \donttest{ #' library(data.table) #' #' search_space = domain = ps( @@ -90,7 +91,7 @@ #' codomain = codomain, #' constants = ps(instances = p_uty())) #' -#' instance = OptimInstanceSingleCrit$new( +#' instance = OptimInstanceBatchSingleCrit$new( #' objective = objective, #' search_space = search_space, #' terminator = trm("evals", n_evals = 1000)) @@ -109,8 +110,9 @@ #' #' # all evaluations #' as.data.table(instance$archive) -OptimizerIrace = R6Class("OptimizerIrace", - inherit = Optimizer, +#' } +OptimizerBatchIrace = R6Class("OptimizerBatchIrace", + inherit = OptimizerBatch, public = list( #' @description @@ -216,7 +218,7 @@ OptimizerIrace = R6Class("OptimizerIrace", ) ) -mlr_optimizers$add("irace", OptimizerIrace) +mlr_optimizers$add("irace", OptimizerBatchIrace) target_runner_default = function(experiment, exec.target.runner, scenario, target.runner) { # nolint optim_instance = scenario$targetRunnerData$inst @@ -248,7 +250,7 @@ paradox_to_irace = function(param_set, digits) { assertClass(param_set, "ParamSet") # workaround for mlr3tuning 0.15.0 digits = assert_int(digits %??% 15, lower = 0) - if ("ParamUty" %in% param_set$class) stop(" not supported by ") + if ("ParamUty" %in% param_set$class) stop(" not supported by ") # types paradox_types = c("ParamLgl", "ParamInt", "ParamDbl", "ParamFct") diff --git a/R/OptimizerNLoptr.R b/R/OptimizerBatchNLoptr.R similarity index 94% rename from R/OptimizerNLoptr.R rename to R/OptimizerBatchNLoptr.R index 1bc57b08b..8a459e3c8 100644 --- a/R/OptimizerNLoptr.R +++ b/R/OptimizerBatchNLoptr.R @@ -4,7 +4,7 @@ #' @name mlr_optimizers_nloptr #' #' @description -#' `OptimizerNLoptr` class that implements non-linear optimization. Calls +#' `OptimizerBatchNLoptr` class that implements non-linear optimization. Calls #' [nloptr::nloptr()] from package \CRANpkg{nloptr}. #' #' @section Parameters: @@ -56,7 +56,7 @@ #' #' # We use the internal termination criterion xtol_rel #' terminator = trm("none") -#' instance = OptimInstanceSingleCrit$new( +#' instance = OptimInstanceBatchSingleCrit$new( #' objective = objective, #' search_space = search_space, #' terminator = terminator) @@ -75,7 +75,7 @@ #' } #' } #' -OptimizerNLoptr = R6Class("OptimizerNLoptr", inherit = Optimizer, +OptimizerBatchNLoptr = R6Class("OptimizerBatchNLoptr", inherit = OptimizerBatch, public = list( #' @description @@ -133,4 +133,4 @@ OptimizerNLoptr = R6Class("OptimizerNLoptr", inherit = Optimizer, ) ) -mlr_optimizers$add("nloptr", OptimizerNLoptr) +mlr_optimizers$add("nloptr", OptimizerBatchNLoptr) diff --git a/R/OptimizerRandomSearch.R b/R/OptimizerBatchRandomSearch.R similarity index 87% rename from R/OptimizerRandomSearch.R rename to R/OptimizerBatchRandomSearch.R index 820cadaea..23cd1d968 100644 --- a/R/OptimizerRandomSearch.R +++ b/R/OptimizerBatchRandomSearch.R @@ -4,7 +4,7 @@ #' @name mlr_optimizers_random_search #' #' @description -#' `OptimizerRandomSearch` class that implements a simple Random Search. +#' `OptimizerBatchRandomSearch` class that implements a simple Random Search. #' #' In order to support general termination criteria and parallelization, we #' evaluate points in a batch-fashion of size `batch_size`. Larger batches mean @@ -27,8 +27,8 @@ #' #' @export #' @template example -OptimizerRandomSearch = R6Class("OptimizerRandomSearch", - inherit = Optimizer, +OptimizerBatchRandomSearch = R6Class("OptimizerBatchRandomSearch", + inherit = OptimizerBatch, public = list( #' @description @@ -62,4 +62,4 @@ OptimizerRandomSearch = R6Class("OptimizerRandomSearch", ) ) -mlr_optimizers$add("random_search", OptimizerRandomSearch) +mlr_optimizers$add("random_search", OptimizerBatchRandomSearch) diff --git a/R/Terminator.R b/R/Terminator.R index 2f1cf0044..c0ff5a48b 100644 --- a/R/Terminator.R +++ b/R/Terminator.R @@ -84,7 +84,7 @@ Terminator = R6Class("Terminator", #' #' @return named `integer(2)`. status = function(archive) { - assert_r6(archive, "Archive") + #assert_r6(archive, "Archive") private$.status(archive) }, diff --git a/R/TerminatorClockTime.R b/R/TerminatorClockTime.R index 96024688d..814617b9d 100644 --- a/R/TerminatorClockTime.R +++ b/R/TerminatorClockTime.R @@ -49,7 +49,7 @@ TerminatorClockTime = R6Class("TerminatorClockTime", #' #' @return `logical(1)`. is_terminated = function(archive) { - assert_r6(archive, "Archive") + assert_multi_class(archive, c("Archive", "ArchiveAsync")) return(Sys.time() >= self$param_set$values$stop_time) } ), diff --git a/R/TerminatorCombo.R b/R/TerminatorCombo.R index 38e71b8ea..684b78764 100644 --- a/R/TerminatorCombo.R +++ b/R/TerminatorCombo.R @@ -65,7 +65,7 @@ TerminatorCombo = R6Class("TerminatorCombo", #' #' @return `logical(1)`. is_terminated = function(archive) { - assert_r6(archive, "Archive") + assert_multi_class(archive, c("Archive", "ArchiveAsync")) g = if (self$param_set$values$any) any else all g(map_lgl(self$terminators, function(t) t$is_terminated(archive))) }, diff --git a/R/TerminatorEvals.R b/R/TerminatorEvals.R index f55478f68..c09cde20d 100644 --- a/R/TerminatorEvals.R +++ b/R/TerminatorEvals.R @@ -69,7 +69,7 @@ TerminatorEvals = R6Class("TerminatorEvals", #' #' @return `logical(1)`. is_terminated = function(archive) { - assert_r6(archive, "Archive") + assert_multi_class(archive, c("Archive", "ArchiveAsync")) pv = self$param_set$values archive$n_evals >= pv$n_evals + pv$k * archive$search_space$length } diff --git a/R/TerminatorNone.R b/R/TerminatorNone.R index 54989f77e..935428ecd 100644 --- a/R/TerminatorNone.R +++ b/R/TerminatorNone.R @@ -4,7 +4,7 @@ #' @include Terminator.R #' #' @description -#' Mainly useful for optimization algorithms where the stopping is inherently controlled by the algorithm itself (e.g. [OptimizerGridSearch]). +#' Mainly useful for optimization algorithms where the stopping is inherently controlled by the algorithm itself (e.g. [OptimizerBatchGridSearch]). #' #' @templateVar id none #' @template section_dictionary_terminator @@ -35,7 +35,7 @@ TerminatorNone = R6Class("TerminatorNone", #' #' @return `logical(1)`. is_terminated = function(archive) { - assert_r6(archive, "Archive") + assert_multi_class(archive, c("Archive", "ArchiveAsync")) return(FALSE) } ) diff --git a/R/TerminatorPerfReached.R b/R/TerminatorPerfReached.R index a6c0b471d..fa1be7154 100644 --- a/R/TerminatorPerfReached.R +++ b/R/TerminatorPerfReached.R @@ -51,7 +51,7 @@ TerminatorPerfReached = R6Class("TerminatorPerfReached", #' #' @return `logical(1)`. is_terminated = function(archive) { - assert_r6(archive, "Archive") + assert_multi_class(archive, c("Archive", "ArchiveAsync")) level = self$param_set$values$level ycol = archive$cols_y minimize = "minimize" %in% archive$codomain$tags diff --git a/R/TerminatorRunTime.R b/R/TerminatorRunTime.R index 0226319b3..2cad3aa00 100644 --- a/R/TerminatorRunTime.R +++ b/R/TerminatorRunTime.R @@ -52,7 +52,7 @@ TerminatorRunTime = R6Class("TerminatorRunTime", #' #' @return `logical(1)`. is_terminated = function(archive) { - assert_r6(archive, "Archive") + assert_multi_class(archive, c("Archive", "ArchiveAsync")) if (is.null(archive$start_time)) return(FALSE) d = as.numeric(difftime(Sys.time(), archive$start_time, units = "secs")) return(d >= self$param_set$values$secs) diff --git a/R/TerminatorStagnation.R b/R/TerminatorStagnation.R index adbb3e9f6..67c8b2d12 100644 --- a/R/TerminatorStagnation.R +++ b/R/TerminatorStagnation.R @@ -56,7 +56,7 @@ TerminatorStagnation = R6Class("TerminatorStagnation", #' #' @return `logical(1)`. is_terminated = function(archive) { - assert_r6(archive, "Archive") + assert_multi_class(archive, c("Archive", "ArchiveAsync")) pv = self$param_set$values iters = pv$iters ycol = archive$cols_y diff --git a/R/as_terminator.R b/R/as_terminator.R new file mode 100644 index 000000000..d0fb3487f --- /dev/null +++ b/R/as_terminator.R @@ -0,0 +1,40 @@ +#' @title Convert to a Terminator +#' +#' @description +#' Convert object to a [Terminator] or a list of [Terminator]. +#' +#' @param x (any)\cr +#' Object to convert. +#' @param ... (any)\cr +#' Additional arguments. +#' +#' @export +as_terminator = function(x, ...) { # nolint + UseMethod("as_terminator") +} + +#' @export +#' @param clone (`logical(1)`)\cr +#' If `TRUE`, ensures that the returned object is not the same as the input `x`. +#' @rdname as_terminator +as_terminator.Terminator = function(x, clone = FALSE, ...) { # nolint + if (isTRUE(clone)) x$clone() else x +} + +#' @export +#' @rdname as_terminator +as_terminators = function(x, ...) { # nolint + UseMethod("as_terminators") +} + +#' @export +#' @rdname as_terminator +as_terminators.default = function(x, ...) { # nolint + list(as_terminator(x, ...)) +} + +#' @export +#' @rdname as_terminator +as_terminators.list = function(x, ...) { # nolint + lapply(x, as_terminator, ...) +} diff --git a/R/assertions.R b/R/assertions.R index 0f79ed53e..a845f28f2 100644 --- a/R/assertions.R +++ b/R/assertions.R @@ -23,8 +23,10 @@ NULL #' @export #' @param terminator ([Terminator]). #' @param instance ([OptimInstance]). +#' @template param_null_ok #' @rdname bbotk_assertions -assert_terminator = function(terminator, instance = NULL) { +assert_terminator = function(terminator, instance = NULL, null_ok = FALSE) { + if (null_ok && is.null(terminator)) return(NULL) assert_r6(terminator, "Terminator") if (!is.null(instance)) { @@ -34,12 +36,19 @@ assert_terminator = function(terminator, instance = NULL) { invisible(terminator) } +#' @export +#' @param terminators (list of [Terminator]). +#' @rdname bbotk_assertions +assert_terminators = function(terminators) { + invisible(lapply(terminators, assert_terminator)) +} + #' @export #' @param terminator ([Terminator]). #' @param instance ([OptimInstance]). #' @rdname bbotk_assertions assert_terminable = function(terminator, instance) { - if ("OptimInstanceMultiCrit" %in% class(instance)) { + if ("OptimInstanceBatchMultiCrit" %in% class(instance)) { if (!"multi-crit" %in% terminator$properties) { stopf("Terminator '%s' does not support multi-crit optimization", terminator$format()) @@ -63,38 +72,78 @@ assert_set = function(x, empty = TRUE, .var.name = vname(x)) { } #' @export -#' @param optimizer ([Optimizer]) +#' @param optimizer ([OptimizerBatch]). +#' @template param_null_ok #' @rdname bbotk_assertions -assert_optimizer = function(optimizer) { +assert_optimizer = function(optimizer, null_ok = FALSE) { + if (null_ok && is.null(optimizer)) return(NULL) assert_r6(optimizer, "Optimizer") } +#' @export +#' @param optimizer ([OptimizerAsync]) +#' @template param_null_ok +#' @rdname bbotk_assertions +assert_optimizer_async = function(optimizer, null_ok = FALSE) { + if (null_ok && is.null(optimizer)) return(NULL) + assert_r6(optimizer, "OptimizerAsync") +} + +#' @export +#' @param optimizer ([OptimizerAsync]) +#' @template param_null_ok +#' @rdname bbotk_assertions +assert_optimizer_batch = function(optimizer, null_ok = FALSE) { + if (null_ok && is.null(optimizer)) return(NULL) + assert_r6(optimizer, "OptimizerBatch") +} + +#' @export +#' @param inst ([OptimInstance]) +#' @template param_null_ok +#' @rdname bbotk_assertions +assert_instance = function(inst, null_ok = FALSE) { + if (null_ok && is.null(inst)) return(NULL) + assert_r6(inst, "OptimInstance") +} + +#' @param inst ([OptimInstanceBatch]) +#' @template param_null_ok +#' @rdname bbotk_assertions +assert_instance_batch = function(inst, null_ok = FALSE) { + if (null_ok && is.null(inst)) return(NULL) + assert_r6(inst, "OptimInstanceBatch") +} + +#' @export +#' @param inst ([OptimInstanceAsync]) +#' @template param_null_ok +#' @rdname bbotk_assertions +assert_instance_async = function(inst, null_ok = FALSE) { + if (null_ok && is.null(inst)) return(NULL) + assert_r6(inst, "OptimInstanceAsync") +} + #' @export #' @param optimizer ([Optimizer]). #' @param instance ([OptimInstance]). #' @rdname bbotk_assertions assert_instance_properties = function(optimizer, inst) { - assert_r6(inst, "OptimInstance") + assert_class(inst, "OptimInstance") require_namespaces(optimizer$packages) # check multi or single-crit if ("multi-crit" %nin% optimizer$properties && inst$objective$ydim > 1) { - stopf( - "'%s' does not support multi-crit objectives", - optimizer$format()) + stopf("'%s' does not support multi-crit objectives", optimizer$format()) } if ("single-crit" %nin% optimizer$properties && inst$objective$ydim == 1) { - stopf( - "'%s' does not support single-crit objectives", - optimizer$format()) + stopf( "'%s' does not support single-crit objectives", optimizer$format()) } # check dependencies if ("dependencies" %nin% optimizer$properties && inst$search_space$has_deps) { - stopf( - "'%s' does not support param sets with dependencies!", - optimizer$format()) + stopf("'%s' does not support param sets with dependencies!", optimizer$format()) } # check supported parameter class @@ -103,3 +152,30 @@ assert_instance_properties = function(optimizer, inst) { stopf("'%s' does not support param types: '%s'", class(optimizer)[1L], paste0(not_supported_pclasses, collapse = ",")) } } + +#' @export +#' @param archive ([Archive]). +#' @template param_null_ok +#' @rdname bbotk_assertions +assert_archive = function(archive, null_ok = FALSE) { + if (null_ok && is.null(archive)) return(NULL) + assert_r6(archive, "Archive") +} + +#' @export +#' @param archive ([ArchiveAsync]). +#' @template param_null_ok +#' @rdname bbotk_assertions +assert_archive_async = function(archive, null_ok = FALSE) { + if (null_ok && is.null(archive)) return(NULL) + assert_r6(archive, "ArchiveAsync") +} + +#' @export +#' @param archive ([ArchiveBatch]). +#' @template param_null_ok +#' @rdname bbotk_assertions +assert_archive_batch = function(archive, null_ok = FALSE) { + if (null_ok && is.null(archive)) return(NULL) + assert_r6(archive, "ArchiveBatch") +} diff --git a/R/bb_optimize.R b/R/bb_optimize.R index e5bd0e53d..705935792 100644 --- a/R/bb_optimize.R +++ b/R/bb_optimize.R @@ -32,7 +32,7 @@ #' @return `list` of #' * `"par"` - Best found parameters #' * `"value"` - Optimal outcome -#' * `"instance"` - [OptimInstanceSingleCrit] | [OptimInstanceMultiCrit] +#' * `"instance"` - [OptimInstanceBatchSingleCrit] | [OptimInstanceBatchMultiCrit] #' #' @export #' @examples @@ -104,7 +104,7 @@ bb_optimize.Objective = function(x, method = "random_search", max_evals = 1000, } else if (!is.null(max_time)) { trm("run_time", secs = max_time) } - optiminstance = if (x$codomain$length == 1) OptimInstanceSingleCrit else OptimInstanceMultiCrit + optiminstance = if (x$codomain$length == 1) OptimInstanceBatchSingleCrit else OptimInstanceBatchMultiCrit instance = optiminstance$new(x, terminator = terminator, search_space = search_space, check_values = FALSE) optimizer$optimize(instance) diff --git a/R/helper.R b/R/helper.R index cc5930a01..b172f54b7 100644 --- a/R/helper.R +++ b/R/helper.R @@ -1,3 +1,12 @@ +#' @title Termination Error +#' +#' @description +#' Error class for termination. +#' +#' @param optim_instance [OptimInstance]\cr +#' OptimInstance that terminated. +#' +#' @export terminated_error = function(optim_instance) { msg = sprintf( fmt = "Objective (obj:%s, term:%s) terminated", @@ -23,6 +32,7 @@ is_dominated = function(ymat) { } #' @title Calculates the transformed x-values +#' #' @description #' Transforms a given `data.table()` to a list with transformed x values. #' If no trafo is defined it will just convert the `data.table()` to a list. @@ -42,70 +52,23 @@ transform_xdt_to_xss = function(xdt, search_space) { design$transpose(trafo = TRUE, filter_na = TRUE) } -#' @title Default optimization function -#' @description -#' Used internally in the [Optimizer]. -#' Brings together the private `.optimize()` method and the private `.assign_result()` method. -#' -#' @param inst [OptimInstance] -#' @param self [Optimizer] -#' @param private (`environment()`) -#' -#' @return [data.table::data.table] +#' @title Calculate the transformed x-values #' -#' @keywords internal -#' @export -optimize_default = function(inst, self, private) { - assert_instance_properties(self, inst) - if (isNamespaceLoaded("progressr")) { - # initialize progressor - # progressor must be initialized here because progressor finishes when exiting a function since version 0.7.0 - max_steps = assert_int(inst$terminator$status(inst$archive)["max_steps"]) - unit = assert_character(inst$terminator$unit) - progressor = progressr::progressor(steps = max_steps) - inst$progressor = Progressor$new(progressor, unit) - inst$progressor$max_steps = max_steps - } - - # start optimization - lg$info("Starting to optimize %i parameter(s) with '%s' and '%s'", - inst$search_space$length, self$format(), inst$terminator$format(with_params = TRUE)) - tryCatch({ - private$.optimize(inst) - }, terminated_error = function(cond) { - }) - private$.assign_result(inst) - lg$info("Finished optimizing after %i evaluation(s)", inst$archive$n_evals) - lg$info("Result:") - lg$info(capture.output(print( - inst$result, lass = FALSE, row.names = FALSE, print.keys = FALSE))) - return(inst$result) -} - -#' @title Default assign_result function #' @description -#' Used internally in the [Optimizer]. -#' It is the default way to determine the result by simply obtaining the best performing result from the archive. +#' Transforms a given `list()` to a list with transformed x values. #' -#' @param inst [OptimInstance] +#' @param xs (`list()`) \cr +#' List of x-values. +#' @param search_space [paradox::ParamSet]\cr +#' Search space. #' -#' @keywords internal #' @export -assign_result_default = function(inst) { - res = inst$archive$best() - - xdt = res[, inst$search_space$ids(), with = FALSE] - - if (inherits(inst, "OptimInstanceMultiCrit")) { - ydt = res[, inst$archive$cols_y, with = FALSE] - inst$assign_result(xdt, ydt) - } else { - # unlist keeps name! - y = unlist(res[, inst$archive$cols_y, with = FALSE]) - inst$assign_result(xdt, y) +trafo_xs = function(xs, search_space) { + xs = discard(xs, is_scalar_na) + if (search_space$has_trafo) { + xs = search_space$trafo(xs, search_space) } - - invisible(NULL) + return(xs) } #' @title Get start values for optimizers @@ -167,3 +130,5 @@ allow_partial_matching = list( warnPartialMatchAttr = FALSE, warnPartialMatchDollar = FALSE ) + + diff --git a/R/mlr_callbacks.R b/R/mlr_callbacks.R index b14f8e404..9ec9bb86a 100644 --- a/R/mlr_callbacks.R +++ b/R/mlr_callbacks.R @@ -1,17 +1,17 @@ #' @title Backup Archive Callback #' -#' @include CallbackOptimization.R +#' @include CallbackBatch.R #' @name bbotk.backup #' #' @description -#' This [CallbackOptimization] writes the [Archive] after each batch to disk. +#' This [CallbackBatch] writes the [Archive] after each batch to disk. #' #' @examples #' clbk("bbotk.backup", path = "backup.rds") NULL load_callback_backup = function() { - callback_optimization("bbotk.backup", + callback_batch("bbotk.backup", label = "Backup Archive Callback", man = "bbotk::bbotk.backup", on_optimization_begin = function(callback, context) { diff --git a/R/sugar.R b/R/sugar.R index 5c3a33130..f97eccc40 100644 --- a/R/sugar.R +++ b/R/sugar.R @@ -66,3 +66,68 @@ opt = function(.key, ...) { opts = function(.keys, ...) { dictionary_sugar_mget(mlr_optimizers, .keys, ...) } + +#' @title Syntactic Sugar for Optimization Instance Construction +#' +#' @description +#' Function to construct a [OptimInstanceBatchSingleCrit] and [OptimInstanceBatchMultiCrit]. +#' +#' +#' @template param_objective +#' @template param_search_space +#' @template param_terminator +#' @template param_callbacks +#' @template param_check_values +#' @template param_keep_evals +#' +#' @export +oi = function( + objective, + search_space = NULL, + terminator, + callbacks = NULL, + check_values = TRUE, + keep_evals = "all" + ) { + assert_r6(objective, "Objective") + + Instance = if (objective$codomain$target_length == 1) OptimInstanceBatchSingleCrit else OptimInstanceBatchMultiCrit + Instance$new( + objective = objective, + search_space = search_space, + terminator = terminator, + callbacks = callbacks) +} + +#' @title Syntactic Sugar for Asynchronous Optimization Instance Construction +#' +#' @description +#' Function to construct an [OptimInstanceAsyncSingleCrit] and [OptimInstanceAsyncMultiCrit]. +#' +#' @template param_objective +#' @template param_search_space +#' @template param_terminator +#' @template param_check_values +#' @template param_callbacks +#' @template param_rush +#' +#' @export +oi_async = function( + objective, + search_space = NULL, + terminator, + check_values = FALSE, + callbacks = NULL, + rush = NULL + ) { + assert_r6(objective, "Objective") + + Instance = if (objective$codomain$target_length == 1) OptimInstanceAsyncSingleCrit else OptimInstanceAsyncMultiCrit + Instance$new( + objective = objective, + search_space = search_space, + terminator = terminator, + check_values = check_values, + callbacks = callbacks, + rush = rush) +} diff --git a/R/worker_loops.R b/R/worker_loops.R new file mode 100644 index 000000000..09b2ec378 --- /dev/null +++ b/R/worker_loops.R @@ -0,0 +1,28 @@ +#' @title Worker loop for Rush +#' +#' @description +#' Loop run on the workers. +#' Pops a task from the queue and evaluates it with the objective function. +#' Pushes the results back to the data base. +#' +#' @template param_rush +#' +#' @param optimizer [OptimizerAsync]. +#' @param instance [OptimInstanceAsync]. +#' +#' @keywords internal +#' @export +bbotk_worker_loop = function(rush, optimizer, instance) { + # replace controller with worker + instance$rush = rush + instance$archive$rush = rush + + call_back("on_worker_begin", instance$objective$callbacks, instance$objective$context) + + # run optimizer loop + get_private(optimizer)$.optimize(instance) + + call_back("on_worker_end", instance$objective$callbacks, instance$objective$context) + + return(NULL) +} diff --git a/R/zzz.R b/R/zzz.R index bcf2f04fb..7996bce2a 100644 --- a/R/zzz.R +++ b/R/zzz.R @@ -2,6 +2,7 @@ #' @import checkmate #' @import paradox #' @import mlr3misc +#' @import rush #' @importFrom R6 R6Class #' @importFrom utils capture.output head tail #' @importFrom methods formalArgs @@ -34,5 +35,4 @@ } } # nocov end - leanify_package() diff --git a/README.Rmd b/README.Rmd index 4f46b8b41..6f4415dd4 100644 --- a/README.Rmd +++ b/README.Rmd @@ -88,7 +88,7 @@ objective = ObjectiveRFun$new( terminator = trm("evals", n_evals = 10) # create optimization instance -instance = OptimInstanceSingleCrit$new( +instance = OptimInstanceBatchSingleCrit$new( objective = objective, terminator = terminator ) diff --git a/README.md b/README.md index b0426dc6e..f183c3cd1 100644 --- a/README.md +++ b/README.md @@ -91,7 +91,7 @@ objective = ObjectiveRFun$new( terminator = trm("evals", n_evals = 10) # create optimization instance -instance = OptimInstanceSingleCrit$new( +instance = OptimInstanceBatchSingleCrit$new( objective = objective, terminator = terminator ) @@ -157,5 +157,5 @@ result$par result$value ``` - ## y1 + ## y1 ## -142.5479 diff --git a/inst/WORDLIST b/inst/WORDLIST index 467296073..518528c56 100644 --- a/inst/WORDLIST +++ b/inst/WORDLIST @@ -4,10 +4,10 @@ Birattari Branin CMA Caceres -CallbackOptimization -CallbackOptimizations +CallbackBatch +CallbackBatchs Codomain -ContextOptimization +ContextBatch Dubois GenSA Gubian @@ -19,7 +19,7 @@ Mattermost NLopt ORCID OptimInstance -OptimizerGridSearch +OptimizerBatchGridSearch POSIXct Palmerin Param diff --git a/man-roxygen/example.R b/man-roxygen/example.R index 0456848c7..64578e699 100644 --- a/man-roxygen/example.R +++ b/man-roxygen/example.R @@ -12,7 +12,7 @@ #' domain = domain, #' codomain = codomain) #' -#' instance = OptimInstanceSingleCrit$new( +#' instance = OptimInstanceBatchSingleCrit$new( #' objective = objective, #' search_space = search_space, #' terminator = trm("evals", n_evals = 10)) diff --git a/man-roxygen/field_archive.R b/man-roxygen/field_archive.R new file mode 100644 index 000000000..5e1e33358 --- /dev/null +++ b/man-roxygen/field_archive.R @@ -0,0 +1,2 @@ +#' @field archive ([Archive])\cr +#' Contains all performed function calls of the Objective. diff --git a/man-roxygen/field_callbacks.R b/man-roxygen/field_callbacks.R new file mode 100644 index 000000000..6261e8c85 --- /dev/null +++ b/man-roxygen/field_callbacks.R @@ -0,0 +1,2 @@ +#' @field callbacks (list of [mlr3misc::Callback])\cr +#' Callbacks applied during the optimization. diff --git a/man-roxygen/field_codomain.R b/man-roxygen/field_codomain.R new file mode 100644 index 000000000..8c4907f7b --- /dev/null +++ b/man-roxygen/field_codomain.R @@ -0,0 +1,3 @@ +#' @field codomain ([Codomain])\cr +#' Codomain of objective function. + diff --git a/man-roxygen/field_context.R b/man-roxygen/field_context.R new file mode 100644 index 000000000..3c68b10fa --- /dev/null +++ b/man-roxygen/field_context.R @@ -0,0 +1,2 @@ +#' @field context ([Context])\cr +#' Stores the context for the callbacks. diff --git a/man-roxygen/field_objective.R b/man-roxygen/field_objective.R new file mode 100644 index 000000000..94a6dc75b --- /dev/null +++ b/man-roxygen/field_objective.R @@ -0,0 +1,2 @@ +#' @field objective ([Objective])\cr +#' Objective function of the instance. diff --git a/man-roxygen/field_progressor.R b/man-roxygen/field_progressor.R new file mode 100644 index 000000000..ac67192f4 --- /dev/null +++ b/man-roxygen/field_progressor.R @@ -0,0 +1,2 @@ +#' @field progressor (`progressor()`)\cr +#' Stores `progressor` function. diff --git a/man-roxygen/field_restart_lost_workers.R b/man-roxygen/field_restart_lost_workers.R new file mode 100644 index 000000000..a566f735c --- /dev/null +++ b/man-roxygen/field_restart_lost_workers.R @@ -0,0 +1,2 @@ +#' @field restart_lost_workers (`logical(1)`)\cr +#' If `TRUE`, the instance will restart lost workers. diff --git a/man-roxygen/field_rush.R b/man-roxygen/field_rush.R new file mode 100644 index 000000000..f3fc150d9 --- /dev/null +++ b/man-roxygen/field_rush.R @@ -0,0 +1,2 @@ +#' @field rush (`Rush`)\cr +#' Rush controller for parallel optimization. diff --git a/man-roxygen/field_search_space.R b/man-roxygen/field_search_space.R new file mode 100644 index 000000000..c77cb329b --- /dev/null +++ b/man-roxygen/field_search_space.R @@ -0,0 +1,2 @@ +#' @field search_space ([paradox::ParamSet])\cr +#' Specification of the search space for the [Optimizer]. diff --git a/man-roxygen/field_start_time.R b/man-roxygen/field_start_time.R new file mode 100644 index 000000000..0bc785de8 --- /dev/null +++ b/man-roxygen/field_start_time.R @@ -0,0 +1,4 @@ +#' @field start_time ([POSIXct])\cr +#' Time stamp of when the optimization started. +#' The time is set by the [Optimizer]. +#' diff --git a/man-roxygen/field_terminator.R b/man-roxygen/field_terminator.R new file mode 100644 index 000000000..57a5a1030 --- /dev/null +++ b/man-roxygen/field_terminator.R @@ -0,0 +1,2 @@ +#' @field terminator [Terminator]\cr +#' Termination criterion of the optimization. diff --git a/man-roxygen/param_await_workers.R b/man-roxygen/param_await_workers.R new file mode 100644 index 000000000..8cb8a1d64 --- /dev/null +++ b/man-roxygen/param_await_workers.R @@ -0,0 +1,2 @@ +#' @param await_workers (`logical(1)`)\cr +#' Whether to wait until all workers are available. diff --git a/man-roxygen/param_check_values.R b/man-roxygen/param_check_values.R index 2f3ce3629..399c0aafb 100644 --- a/man-roxygen/param_check_values.R +++ b/man-roxygen/param_check_values.R @@ -1,3 +1,2 @@ #' @param check_values (`logical(1)`)\cr -#' Should points before the evaluation and the results be checked for -#' validity? +#' Should points before the evaluation and the results be checked for validity? diff --git a/man-roxygen/param_detect_lost_tasks.R b/man-roxygen/param_detect_lost_tasks.R new file mode 100644 index 000000000..ebaace719 --- /dev/null +++ b/man-roxygen/param_detect_lost_tasks.R @@ -0,0 +1,3 @@ +#' @param detect_lost_tasks (`logical(1)`)\cr +#' Whether to detect lost tasks. +#' Defaults to `FALSE`. diff --git a/man-roxygen/param_freeze_archive.R b/man-roxygen/param_freeze_archive.R new file mode 100644 index 000000000..ca0bbe750 --- /dev/null +++ b/man-roxygen/param_freeze_archive.R @@ -0,0 +1,4 @@ +#' @param freeze_archive (`logical(1)`)\cr +#' If `TRUE`, the archive is copied from redis to a local data.table after tuning. +#' This is helpful when the tuning is run on a remote machine and the archive is serialized. +#' Only used if `rush` is supplied. diff --git a/man-roxygen/param_heartbeat_expire.R b/man-roxygen/param_heartbeat_expire.R new file mode 100644 index 000000000..c78b50404 --- /dev/null +++ b/man-roxygen/param_heartbeat_expire.R @@ -0,0 +1,2 @@ +#' @param heartbeat_expire (`integer(1)`)\cr +#' Time to live of the heartbeat in seconds. diff --git a/man-roxygen/param_heartbeat_period.R b/man-roxygen/param_heartbeat_period.R new file mode 100644 index 000000000..602697ccf --- /dev/null +++ b/man-roxygen/param_heartbeat_period.R @@ -0,0 +1,2 @@ +#' @param heartbeat_period (`integer(1)`)\cr +#' Period of the heartbeat in seconds. diff --git a/man-roxygen/param_host.R b/man-roxygen/param_host.R new file mode 100644 index 000000000..ad6c7a10a --- /dev/null +++ b/man-roxygen/param_host.R @@ -0,0 +1,2 @@ +#' @param host (`character(1)`)\cr +#' Worker is started on a local or remote host. diff --git a/man-roxygen/param_lgr_thresholds.R b/man-roxygen/param_lgr_thresholds.R new file mode 100644 index 000000000..6aedd0cc9 --- /dev/null +++ b/man-roxygen/param_lgr_thresholds.R @@ -0,0 +1,3 @@ +#' @param lgr_thresholds (named `character()` or `numeric()`)\cr +#' Thresholds for the `lgr` package e.g. c(rush = "debug", bbotk = "info"). +#' If `NULL`, no log messages from the workers are saved. diff --git a/man-roxygen/param_n_workers.R b/man-roxygen/param_n_workers.R new file mode 100644 index 000000000..7e82ba44e --- /dev/null +++ b/man-roxygen/param_n_workers.R @@ -0,0 +1,3 @@ +#' @param n_workers (`integer(1)`)\cr +#' Number of workers to be started. +#' If `NULL` the maximum number of free workers is used. diff --git a/man-roxygen/param_null_ok.R b/man-roxygen/param_null_ok.R new file mode 100644 index 000000000..046a7a1b8 --- /dev/null +++ b/man-roxygen/param_null_ok.R @@ -0,0 +1,2 @@ +#' @param null_ok (`logical(1)`)\cr +#' Is `NULL` a valid value? diff --git a/man-roxygen/param_objective.R b/man-roxygen/param_objective.R new file mode 100644 index 000000000..305e14475 --- /dev/null +++ b/man-roxygen/param_objective.R @@ -0,0 +1,2 @@ +#' @param objective ([Objective])\cr +#' Objective function. diff --git a/man-roxygen/param_packages.R b/man-roxygen/param_packages.R new file mode 100644 index 000000000..7358561bf --- /dev/null +++ b/man-roxygen/param_packages.R @@ -0,0 +1,2 @@ +#' @param packages (`character()`)\cr +#' Packages to be loaded by the workers. diff --git a/man-roxygen/param_rush.R b/man-roxygen/param_rush.R new file mode 100644 index 000000000..c4441b331 --- /dev/null +++ b/man-roxygen/param_rush.R @@ -0,0 +1,2 @@ +#' @param rush (`Rush`)\cr +#' If a rush instance is supplied, the tuning runs without batches. diff --git a/man-roxygen/param_start_workers.R b/man-roxygen/param_start_workers.R new file mode 100644 index 000000000..cbec8d314 --- /dev/null +++ b/man-roxygen/param_start_workers.R @@ -0,0 +1,2 @@ +#' @param start_workers (`logical(1)`)\cr +#' If `TRUE`, the workers are started immediately with `future` on all available cores. diff --git a/man-roxygen/param_terminator.R b/man-roxygen/param_terminator.R new file mode 100644 index 000000000..83fee73cd --- /dev/null +++ b/man-roxygen/param_terminator.R @@ -0,0 +1,2 @@ +#' @param terminator [Terminator]\cr +#' Termination criterion. diff --git a/man/Archive.Rd b/man/Archive.Rd index 01ad8705e..009bfd6b0 100644 --- a/man/Archive.Rd +++ b/man/Archive.Rd @@ -2,55 +2,34 @@ % Please edit documentation in R/Archive.R \name{Archive} \alias{Archive} -\title{Logging object for objective function evaluations} +\title{Data Storage} \description{ -Container around a \link[data.table:data.table]{data.table::data.table} which stores all performed -function calls of the Objective. +The \link{Archive} class stores all proposed points and their corresponding evaluations. } -\section{S3 Methods}{ - -\itemize{ -\item \code{as.data.table(archive)}\cr -\link{Archive} -> \code{\link[data.table:data.table]{data.table::data.table()}}\cr -Returns a tabular view of all performed function calls of the -Objective. The \code{x_domain} column is unnested to separate columns. +\details{ +The \link{Archive} is an abstract class that implements the base functionality each archive must provide. } -} - \section{Public fields}{ \if{html}{\out{
}} \describe{ \item{\code{search_space}}{(\link[paradox:ParamSet]{paradox::ParamSet})\cr -Search space of objective.} +Specification of the search space for the \link{Optimizer}.} \item{\code{codomain}}{(\link{Codomain})\cr Codomain of objective function.} \item{\code{start_time}}{(\link{POSIXct})\cr -Time stamp of when the optimization started. The time is set by the -\link{Optimizer}.} +Time stamp of when the optimization started. +The time is set by the \link{Optimizer}.} \item{\code{check_values}}{(\code{logical(1)})\cr Determines if points and results are checked for validity.} - -\item{\code{data}}{(\link[data.table:data.table]{data.table::data.table})\cr -Contains all performed \link{Objective} function calls.} - -\item{\code{data_extra}}{(named \code{list})\cr -Data created by specific \code{\link{Optimizer}}s that does not relate to any individual function evaluation and can therefore not be held in \verb{$data}. -Every optimizer should create and refer to its own entry in this list, named by its \code{class()}.} } \if{html}{\out{
}} } \section{Active bindings}{ \if{html}{\out{
}} \describe{ -\item{\code{n_evals}}{(\code{integer(1)})\cr -Number of evaluations stored in the archive.} - -\item{\code{n_batch}}{(\code{integer(1)})\cr -Number of batches stored in the archive.} - \item{\code{cols_x}}{(\code{character()})\cr Column names of search space parameters.} @@ -63,9 +42,6 @@ Column names of codomain target parameters.} \subsection{Public methods}{ \itemize{ \item \href{#method-Archive-new}{\code{Archive$new()}} -\item \href{#method-Archive-add_evals}{\code{Archive$add_evals()}} -\item \href{#method-Archive-best}{\code{Archive$best()}} -\item \href{#method-Archive-nds_selection}{\code{Archive$nds_selection()}} \item \href{#method-Archive-format}{\code{Archive$format()}} \item \href{#method-Archive-print}{\code{Archive$print()}} \item \href{#method-Archive-clear}{\code{Archive$clear()}} @@ -78,7 +54,7 @@ Column names of codomain target parameters.} \subsection{Method \code{new()}}{ Creates a new instance of this \link[R6:R6Class]{R6} class. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{Archive$new(search_space, codomain, check_values = TRUE)}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{Archive$new(search_space, codomain, check_values = FALSE)}\if{html}{\out{
}} } \subsection{Arguments}{ @@ -104,95 +80,6 @@ Search space that is logged into archive.} } } \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-Archive-add_evals}{}}} -\subsection{Method \code{add_evals()}}{ -Adds function evaluations to the archive table. -\subsection{Usage}{ -\if{html}{\out{
}}\preformatted{Archive$add_evals(xdt, xss_trafoed = NULL, ydt)}\if{html}{\out{
}} -} - -\subsection{Arguments}{ -\if{html}{\out{
}} -\describe{ -\item{\code{xdt}}{(\code{\link[data.table:data.table]{data.table::data.table()}})\cr -Set of untransformed points / points from the \emph{search space}. -One point per row, e.g. \code{data.table(x1 = c(1, 3), x2 = c(2, 4))}. -Column names have to match ids of the \code{search_space}. -However, \code{xdt} can contain additional columns.} - -\item{\code{xss_trafoed}}{(\code{list()})\cr -Transformed point(s) in the \emph{domain space}.} - -\item{\code{ydt}}{(\code{\link[data.table:data.table]{data.table::data.table()}})\cr -Optimal outcome.} -} -\if{html}{\out{
}} -} -} -\if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-Archive-best}{}}} -\subsection{Method \code{best()}}{ -Returns the best scoring evaluation(s). -For single-crit optimization, the solution that minimizes / maximizes the objective function. -For multi-crit optimization, the Pareto set / front. -\subsection{Usage}{ -\if{html}{\out{
}}\preformatted{Archive$best(batch = NULL, n_select = 1L, ties_method = "first")}\if{html}{\out{
}} -} - -\subsection{Arguments}{ -\if{html}{\out{
}} -\describe{ -\item{\code{batch}}{(\code{integer()})\cr -The batch number(s) to limit the best results to. -Default is all batches.} - -\item{\code{n_select}}{(\code{integer(1L)})\cr -Amount of points to select. -Ignored for multi-crit optimization.} - -\item{\code{ties_method}}{(\code{character(1L)})\cr -Method to break ties when multiple points have the same score. -Either \code{"first"} (default) or \code{"random"}. -Ignored for multi-crit optimization. -If \code{n_select > 1L}, the tie method is ignored and the first point is returned.} -} -\if{html}{\out{
}} -} -\subsection{Returns}{ -\code{\link[data.table:data.table]{data.table::data.table()}} -} -} -\if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-Archive-nds_selection}{}}} -\subsection{Method \code{nds_selection()}}{ -Calculate best points w.r.t. non dominated sorting with hypervolume contribution. -\subsection{Usage}{ -\if{html}{\out{
}}\preformatted{Archive$nds_selection(batch = NULL, n_select = 1, ref_point = NULL)}\if{html}{\out{
}} -} - -\subsection{Arguments}{ -\if{html}{\out{
}} -\describe{ -\item{\code{batch}}{(\code{integer()})\cr -The batch number(s) to limit the best points to. Default is -all batches.} - -\item{\code{n_select}}{(\code{integer(1L)})\cr -Amount of points to select.} - -\item{\code{ref_point}}{(\code{numeric()})\cr -Reference point for hypervolume.} -} -\if{html}{\out{
}} -} -\subsection{Returns}{ -\code{\link[data.table:data.table]{data.table::data.table()}} -} -} -\if{html}{\out{
}} \if{html}{\out{}} \if{latex}{\out{\hypertarget{method-Archive-format}{}}} \subsection{Method \code{format()}}{ diff --git a/man/ArchiveAsync.Rd b/man/ArchiveAsync.Rd new file mode 100644 index 000000000..5c67c56ae --- /dev/null +++ b/man/ArchiveAsync.Rd @@ -0,0 +1,312 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ArchiveAsync.R +\name{ArchiveAsync} +\alias{ArchiveAsync} +\title{Rush Data Storage} +\description{ +The \code{ArchiveAsync} stores all evaluated points and performance scores in a \link[rush:Rush]{rush::Rush} data base. +} +\section{S3 Methods}{ + +\itemize{ +\item \code{as.data.table(archive)}\cr +\link{ArchiveAsync} -> \code{\link[data.table:data.table]{data.table::data.table()}}\cr +Returns a tabular view of all performed function calls of the Objective. +The \code{x_domain} column is unnested to separate columns. +} +} + +\section{Super class}{ +\code{\link[bbotk:Archive]{bbotk::Archive}} -> \code{ArchiveAsync} +} +\section{Public fields}{ +\if{html}{\out{
}} +\describe{ +\item{\code{rush}}{(\code{Rush})\cr +Rush controller for parallel optimization.} +} +\if{html}{\out{
}} +} +\section{Active bindings}{ +\if{html}{\out{
}} +\describe{ +\item{\code{data}}{(\link[data.table:data.table]{data.table::data.table})\cr +Data table with all finished points.} + +\item{\code{queued_data}}{(\link[data.table:data.table]{data.table::data.table})\cr +Data table with all queued points.} + +\item{\code{running_data}}{(\link[data.table:data.table]{data.table::data.table})\cr +Data table with all running points.} + +\item{\code{finished_data}}{(\link[data.table:data.table]{data.table::data.table})\cr +Data table with all finished points.} + +\item{\code{failed_data}}{(\link[data.table:data.table]{data.table::data.table})\cr +Data table with all failed points.} + +\item{\code{n_queued}}{(\code{integer(1)})\cr +Number of queued points.} + +\item{\code{n_running}}{(\code{integer(1)})\cr +Number of running points.} + +\item{\code{n_finished}}{(\code{integer(1)})\cr +Number of finished points.} + +\item{\code{n_failed}}{(\code{integer(1)})\cr +Number of failed points.} + +\item{\code{n_evals}}{(\code{integer(1)})\cr +Number of evaluations stored in the archive.} +} +\if{html}{\out{
}} +} +\section{Methods}{ +\subsection{Public methods}{ +\itemize{ +\item \href{#method-ArchiveAsync-new}{\code{ArchiveAsync$new()}} +\item \href{#method-ArchiveAsync-push_points}{\code{ArchiveAsync$push_points()}} +\item \href{#method-ArchiveAsync-pop_point}{\code{ArchiveAsync$pop_point()}} +\item \href{#method-ArchiveAsync-push_running_point}{\code{ArchiveAsync$push_running_point()}} +\item \href{#method-ArchiveAsync-push_result}{\code{ArchiveAsync$push_result()}} +\item \href{#method-ArchiveAsync-push_failed_point}{\code{ArchiveAsync$push_failed_point()}} +\item \href{#method-ArchiveAsync-data_with_state}{\code{ArchiveAsync$data_with_state()}} +\item \href{#method-ArchiveAsync-best}{\code{ArchiveAsync$best()}} +\item \href{#method-ArchiveAsync-nds_selection}{\code{ArchiveAsync$nds_selection()}} +\item \href{#method-ArchiveAsync-clear}{\code{ArchiveAsync$clear()}} +} +} +\if{html}{\out{ +
Inherited methods + +
+}} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-ArchiveAsync-new}{}}} +\subsection{Method \code{new()}}{ +Creates a new instance of this \link[R6:R6Class]{R6} class. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{ArchiveAsync$new(search_space, codomain, check_values = FALSE, rush)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{search_space}}{(\link[paradox:ParamSet]{paradox::ParamSet})\cr +Specifies the search space for the \link{Optimizer}. The \link[paradox:ParamSet]{paradox::ParamSet} +describes either a subset of the \code{domain} of the \link{Objective} or it describes +a set of parameters together with a \code{trafo} function that transforms values +from the search space to values of the domain. Depending on the context, this +value defaults to the domain of the objective.} + +\item{\code{codomain}}{(\link[paradox:ParamSet]{paradox::ParamSet})\cr +Specifies codomain of function. +Most importantly the tags of each output "Parameter" define whether it should +be minimized or maximized. The default is to minimize each component.} + +\item{\code{check_values}}{(\code{logical(1)})\cr +Should points before the evaluation and the results be checked for validity?} + +\item{\code{rush}}{(\code{Rush})\cr +If a rush instance is supplied, the tuning runs without batches.} +} +\if{html}{\out{
}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-ArchiveAsync-push_points}{}}} +\subsection{Method \code{push_points()}}{ +Push queued points to the archive. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{ArchiveAsync$push_points(xss)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{xss}}{(list of named \code{list()})\cr +List of named lists of point values.} +} +\if{html}{\out{
}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-ArchiveAsync-pop_point}{}}} +\subsection{Method \code{pop_point()}}{ +Pop a point from the queue. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{ArchiveAsync$pop_point()}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-ArchiveAsync-push_running_point}{}}} +\subsection{Method \code{push_running_point()}}{ +Push running point to the archive. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{ArchiveAsync$push_running_point(xs, extra = NULL)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{xs}}{(named \code{list})\cr +Named list of point values.} + +\item{\code{extra}}{(\code{list()})\cr +Named list of additional information.} +} +\if{html}{\out{
}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-ArchiveAsync-push_result}{}}} +\subsection{Method \code{push_result()}}{ +Push result to the archive. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{ArchiveAsync$push_result(key, ys, x_domain, extra = NULL)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{key}}{(\code{character()})\cr +Key of the point.} + +\item{\code{ys}}{(\code{list()})\cr +Named list of results.} + +\item{\code{x_domain}}{(\code{list()})\cr +Named list of transformed point values.} + +\item{\code{extra}}{(\code{list()})\cr +Named list of additional information.} +} +\if{html}{\out{
}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-ArchiveAsync-push_failed_point}{}}} +\subsection{Method \code{push_failed_point()}}{ +Push failed point to the archive. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{ArchiveAsync$push_failed_point(key, message)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{key}}{(\code{character()})\cr +Key of the point.} + +\item{\code{message}}{(\code{character()})\cr +Error message.} +} +\if{html}{\out{
}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-ArchiveAsync-data_with_state}{}}} +\subsection{Method \code{data_with_state()}}{ +Fetch points with a specific state. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{ArchiveAsync$data_with_state( + fields = c("xs", "ys", "xs_extra", "worker_extra", "ys_extra", "condition"), + states = c("queued", "running", "finished", "failed"), + reset_cache = FALSE +)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{fields}}{(\code{character()})\cr +Fields to fetch. +Defaults to \code{c("xs", "ys", "xs_extra", "worker_extra", "ys_extra")}.} + +\item{\code{states}}{(\code{character()})\cr +States of the tasks to be fetched. +Defaults to \code{c("queued", "running", "finished", "failed")}.} + +\item{\code{reset_cache}}{(\code{logical(1)})\cr +Whether to reset the cache of the finished points.} +} +\if{html}{\out{
}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-ArchiveAsync-best}{}}} +\subsection{Method \code{best()}}{ +Returns the best scoring evaluation(s). +For single-crit optimization, the solution that minimizes / maximizes the objective function. +For multi-crit optimization, the Pareto set / front. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{ArchiveAsync$best(n_select = 1, ties_method = "first")}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{n_select}}{(\code{integer(1L)})\cr +Amount of points to select. +Ignored for multi-crit optimization.} + +\item{\code{ties_method}}{(\code{character(1L)})\cr +Method to break ties when multiple points have the same score. +Either \code{"first"} (default) or \code{"random"}. +Ignored for multi-crit optimization. +If \code{n_select > 1L}, the tie method is ignored and the first point is returned.} +} +\if{html}{\out{
}} +} +\subsection{Returns}{ +\code{\link[data.table:data.table]{data.table::data.table()}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-ArchiveAsync-nds_selection}{}}} +\subsection{Method \code{nds_selection()}}{ +Calculate best points w.r.t. non dominated sorting with hypervolume contribution. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{ArchiveAsync$nds_selection(n_select = 1, ref_point = NULL)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{n_select}}{(\code{integer(1L)})\cr +Amount of points to select.} + +\item{\code{ref_point}}{(\code{numeric()})\cr +Reference point for hypervolume.} +} +\if{html}{\out{
}} +} +\subsection{Returns}{ +\code{\link[data.table:data.table]{data.table::data.table()}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-ArchiveAsync-clear}{}}} +\subsection{Method \code{clear()}}{ +Clear all evaluation results from archive. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{ArchiveAsync$clear()}\if{html}{\out{
}} +} + +} +} diff --git a/man/ArchiveBatch.Rd b/man/ArchiveBatch.Rd new file mode 100644 index 000000000..ea303b5e1 --- /dev/null +++ b/man/ArchiveBatch.Rd @@ -0,0 +1,211 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ArchiveBatch.R +\name{ArchiveBatch} +\alias{ArchiveBatch} +\title{Data Table Storage} +\description{ +The \code{ArchiveBatch} stores all evaluated points and performance scores in a \code{\link[data.table:data.table]{data.table::data.table()}}. +} +\section{S3 Methods}{ + +\itemize{ +\item \code{as.data.table(archive)}\cr +\link{ArchiveBatch} -> \code{\link[data.table:data.table]{data.table::data.table()}}\cr +Returns a tabular view of all performed function calls of the Objective. +The \code{x_domain} column is unnested to separate columns. +} +} + +\section{Super class}{ +\code{\link[bbotk:Archive]{bbotk::Archive}} -> \code{ArchiveBatch} +} +\section{Public fields}{ +\if{html}{\out{
}} +\describe{ +\item{\code{data}}{(\link[data.table:data.table]{data.table::data.table})\cr +Contains all performed \link{Objective} function calls.} + +\item{\code{data_extra}}{(named \code{list})\cr +Data created by specific \code{\link{Optimizer}}s that does not relate to any individual function evaluation and can therefore not be held in \verb{$data}. +Every optimizer should create and refer to its own entry in this list, named by its \code{class()}.} +} +\if{html}{\out{
}} +} +\section{Active bindings}{ +\if{html}{\out{
}} +\describe{ +\item{\code{n_evals}}{(\code{integer(1)})\cr +Number of evaluations stored in the archive.} + +\item{\code{n_batch}}{(\code{integer(1)})\cr +Number of batches stored in the archive.} +} +\if{html}{\out{
}} +} +\section{Methods}{ +\subsection{Public methods}{ +\itemize{ +\item \href{#method-ArchiveBatch-new}{\code{ArchiveBatch$new()}} +\item \href{#method-ArchiveBatch-add_evals}{\code{ArchiveBatch$add_evals()}} +\item \href{#method-ArchiveBatch-best}{\code{ArchiveBatch$best()}} +\item \href{#method-ArchiveBatch-nds_selection}{\code{ArchiveBatch$nds_selection()}} +\item \href{#method-ArchiveBatch-clear}{\code{ArchiveBatch$clear()}} +\item \href{#method-ArchiveBatch-clone}{\code{ArchiveBatch$clone()}} +} +} +\if{html}{\out{ +
Inherited methods + +
+}} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-ArchiveBatch-new}{}}} +\subsection{Method \code{new()}}{ +Creates a new instance of this \link[R6:R6Class]{R6} class. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{ArchiveBatch$new(search_space, codomain, check_values = FALSE)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{search_space}}{(\link[paradox:ParamSet]{paradox::ParamSet})\cr +Specifies the search space for the \link{Optimizer}. The \link[paradox:ParamSet]{paradox::ParamSet} +describes either a subset of the \code{domain} of the \link{Objective} or it describes +a set of parameters together with a \code{trafo} function that transforms values +from the search space to values of the domain. Depending on the context, this +value defaults to the domain of the objective.} + +\item{\code{codomain}}{(\link[paradox:ParamSet]{paradox::ParamSet})\cr +Specifies codomain of function. +Most importantly the tags of each output "Parameter" define whether it should +be minimized or maximized. The default is to minimize each component.} + +\item{\code{check_values}}{(\code{logical(1)})\cr +Should x-values that are added to the archive be checked for validity? +Search space that is logged into archive.} +} +\if{html}{\out{
}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-ArchiveBatch-add_evals}{}}} +\subsection{Method \code{add_evals()}}{ +Adds function evaluations to the archive table. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{ArchiveBatch$add_evals(xdt, xss_trafoed = NULL, ydt)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{xdt}}{(\code{\link[data.table:data.table]{data.table::data.table()}})\cr +Set of untransformed points / points from the \emph{search space}. +One point per row, e.g. \code{data.table(x1 = c(1, 3), x2 = c(2, 4))}. +Column names have to match ids of the \code{search_space}. +However, \code{xdt} can contain additional columns.} + +\item{\code{xss_trafoed}}{(\code{list()})\cr +Transformed point(s) in the \emph{domain space}.} + +\item{\code{ydt}}{(\code{\link[data.table:data.table]{data.table::data.table()}})\cr +Optimal outcome.} +} +\if{html}{\out{
}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-ArchiveBatch-best}{}}} +\subsection{Method \code{best()}}{ +Returns the best scoring evaluation(s). +For single-crit optimization, the solution that minimizes / maximizes the objective function. +For multi-crit optimization, the Pareto set / front. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{ArchiveBatch$best(batch = NULL, n_select = 1L, ties_method = "first")}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{batch}}{(\code{integer()})\cr +The batch number(s) to limit the best results to. +Default is all batches.} + +\item{\code{n_select}}{(\code{integer(1L)})\cr +Amount of points to select. +Ignored for multi-crit optimization.} + +\item{\code{ties_method}}{(\code{character(1L)})\cr +Method to break ties when multiple points have the same score. +Either \code{"first"} (default) or \code{"random"}. +Ignored for multi-crit optimization. +If \code{n_select > 1L}, the tie method is ignored and the first point is returned.} +} +\if{html}{\out{
}} +} +\subsection{Returns}{ +\code{\link[data.table:data.table]{data.table::data.table()}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-ArchiveBatch-nds_selection}{}}} +\subsection{Method \code{nds_selection()}}{ +Calculate best points w.r.t. non dominated sorting with hypervolume contribution. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{ArchiveBatch$nds_selection(batch = NULL, n_select = 1, ref_point = NULL)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{batch}}{(\code{integer()})\cr +The batch number(s) to limit the best points to. Default is +all batches.} + +\item{\code{n_select}}{(\code{integer(1L)})\cr +Amount of points to select.} + +\item{\code{ref_point}}{(\code{numeric()})\cr +Reference point for hypervolume.} +} +\if{html}{\out{
}} +} +\subsection{Returns}{ +\code{\link[data.table:data.table]{data.table::data.table()}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-ArchiveBatch-clear}{}}} +\subsection{Method \code{clear()}}{ +Clear all evaluation results from archive. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{ArchiveBatch$clear()}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-ArchiveBatch-clone}{}}} +\subsection{Method \code{clone()}}{ +The objects of this class are cloneable with this method. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{ArchiveBatch$clone(deep = FALSE)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{deep}}{Whether to make a deep clone.} +} +\if{html}{\out{
}} +} +} +} diff --git a/man/ArchiveBest.Rd b/man/ArchiveBest.Rd deleted file mode 100644 index fa80a75d5..000000000 --- a/man/ArchiveBest.Rd +++ /dev/null @@ -1,143 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/ArchiveBest.R -\name{ArchiveBest} -\alias{ArchiveBest} -\title{Minimal logging object for objective function evaluations} -\description{ -The \link{ArchiveBest} stores no data but records the best scoring evaluation -passed to \verb{$add_evals()}. The \link{Archive} API is fully implemented but many -parameters are ignored and some methods do nothing. The archive still works -with \link{TerminatorClockTime}, \link{TerminatorEvals}, \link{TerminatorNone} and -\link{TerminatorEvals}. -} -\section{Super class}{ -\code{\link[bbotk:Archive]{bbotk::Archive}} -> \code{ArchiveBest} -} -\section{Active bindings}{ -\if{html}{\out{
}} -\describe{ -\item{\code{n_evals}}{(\code{integer(1)})\cr -Number of evaluations stored in the archive.} - -\item{\code{n_batch}}{(\code{integer(1)})\cr -Number of batches stored in the archive.} -} -\if{html}{\out{
}} -} -\section{Methods}{ -\subsection{Public methods}{ -\itemize{ -\item \href{#method-ArchiveBest-new}{\code{ArchiveBest$new()}} -\item \href{#method-ArchiveBest-add_evals}{\code{ArchiveBest$add_evals()}} -\item \href{#method-ArchiveBest-best}{\code{ArchiveBest$best()}} -\item \href{#method-ArchiveBest-clone}{\code{ArchiveBest$clone()}} -} -} -\if{html}{\out{ -
Inherited methods - -
-}} -\if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-ArchiveBest-new}{}}} -\subsection{Method \code{new()}}{ -Creates a new instance of this \link[R6:R6Class]{R6} class. -\subsection{Usage}{ -\if{html}{\out{
}}\preformatted{ArchiveBest$new(search_space, codomain, check_values = FALSE)}\if{html}{\out{
}} -} - -\subsection{Arguments}{ -\if{html}{\out{
}} -\describe{ -\item{\code{search_space}}{(\link[paradox:ParamSet]{paradox::ParamSet})\cr -Specifies the search space for the \link{Optimizer}. The \link[paradox:ParamSet]{paradox::ParamSet} -describes either a subset of the \code{domain} of the \link{Objective} or it describes -a set of parameters together with a \code{trafo} function that transforms values -from the search space to values of the domain. Depending on the context, this -value defaults to the domain of the objective.} - -\item{\code{codomain}}{(\link[paradox:ParamSet]{paradox::ParamSet})\cr -Specifies codomain of function. -Most importantly the tags of each output "Parameter" define whether it should -be minimized or maximized. The default is to minimize each component.} - -\item{\code{check_values}}{(\code{logical(1)})\cr -ignored.} -} -\if{html}{\out{
}} -} -} -\if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-ArchiveBest-add_evals}{}}} -\subsection{Method \code{add_evals()}}{ -Stores the best result in \code{ydt}. -\subsection{Usage}{ -\if{html}{\out{
}}\preformatted{ArchiveBest$add_evals(xdt, xss_trafoed = NULL, ydt)}\if{html}{\out{
}} -} - -\subsection{Arguments}{ -\if{html}{\out{
}} -\describe{ -\item{\code{xdt}}{(\code{\link[data.table:data.table]{data.table::data.table()}})\cr -Set of untransformed points / points from the \emph{search space}. -One point per row, e.g. \code{data.table(x1 = c(1, 3), x2 = c(2, 4))}. -Column names have to match ids of the \code{search_space}. -However, \code{xdt} can contain additional columns.} - -\item{\code{xss_trafoed}}{(\code{list()})\cr -Transformed point(s) in the \emph{domain space}.} - -\item{\code{ydt}}{(\code{\link[data.table:data.table]{data.table::data.table()}})\cr -Optimal outcome.} -} -\if{html}{\out{
}} -} -} -\if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-ArchiveBest-best}{}}} -\subsection{Method \code{best()}}{ -Returns the best scoring evaluation. For single-crit optimization, -the solution that minimizes / maximizes the objective function. -For multi-crit optimization, the Pareto set / front. -\subsection{Usage}{ -\if{html}{\out{
}}\preformatted{ArchiveBest$best(m = NULL)}\if{html}{\out{
}} -} - -\subsection{Arguments}{ -\if{html}{\out{
}} -\describe{ -\item{\code{m}}{(\code{integer()})\cr -ignored.} -} -\if{html}{\out{
}} -} -\subsection{Returns}{ -\code{\link[data.table:data.table]{data.table::data.table()}} -} -} -\if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-ArchiveBest-clone}{}}} -\subsection{Method \code{clone()}}{ -The objects of this class are cloneable with this method. -\subsection{Usage}{ -\if{html}{\out{
}}\preformatted{ArchiveBest$clone(deep = FALSE)}\if{html}{\out{
}} -} - -\subsection{Arguments}{ -\if{html}{\out{
}} -\describe{ -\item{\code{deep}}{Whether to make a deep clone.} -} -\if{html}{\out{
}} -} -} -} diff --git a/man/CallbackAsync.Rd b/man/CallbackAsync.Rd new file mode 100644 index 000000000..32b261f52 --- /dev/null +++ b/man/CallbackAsync.Rd @@ -0,0 +1,75 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/CallbackAsync.R +\name{CallbackAsync} +\alias{CallbackAsync} +\title{Create Asynchronous Optimization Callback} +\description{ +Specialized \link[mlr3misc:Callback]{mlr3misc::Callback} for asynchronous optimization. +Callbacks allow to customize the behavior of processes in bbotk. +The \code{\link[=callback_async]{callback_async()}} function creates a \link{CallbackAsync}. +Predefined callbacks are stored in the \link[mlr3misc:Dictionary]{dictionary} \link{mlr_callbacks} and can be retrieved with \code{\link[=clbk]{clbk()}}. +For more information on optimization callbacks see \code{\link[=callback_async]{callback_async()}}. +} +\section{Super class}{ +\code{\link[mlr3misc:Callback]{mlr3misc::Callback}} -> \code{CallbackAsync} +} +\section{Public fields}{ +\if{html}{\out{
}} +\describe{ +\item{\code{on_optimization_begin}}{(\verb{function()})\cr +Stage called at the beginning of the optimization in the main process. +Called in \code{Optimizer$optimize()}.} + +\item{\code{on_worker_begin}}{(\verb{function()})\cr +Stage called at the beginning of the optimization on the worker. +Called in the worker loop.} + +\item{\code{on_worker_end}}{(\verb{function()})\cr +Stage called at the end of the optimization on the worker. +Called in the worker loop.} + +\item{\code{on_result}}{(\verb{function()})\cr +Stage called after result are written. +Called in \code{OptimInstance$assign_result()}.} + +\item{\code{on_optimization_end}}{(\verb{function()})\cr +Stage called at the end of the optimization in the main process. +Called in \code{Optimizer$optimize()}.} +} +\if{html}{\out{
}} +} +\section{Methods}{ +\subsection{Public methods}{ +\itemize{ +\item \href{#method-CallbackAsync-clone}{\code{CallbackAsync$clone()}} +} +} +\if{html}{\out{ +
Inherited methods + +
+}} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-CallbackAsync-clone}{}}} +\subsection{Method \code{clone()}}{ +The objects of this class are cloneable with this method. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{CallbackAsync$clone(deep = FALSE)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{deep}}{Whether to make a deep clone.} +} +\if{html}{\out{
}} +} +} +} diff --git a/man/CallbackOptimization.Rd b/man/CallbackBatch.Rd similarity index 79% rename from man/CallbackOptimization.Rd rename to man/CallbackBatch.Rd index 5f717609d..5f414ec22 100644 --- a/man/CallbackOptimization.Rd +++ b/man/CallbackBatch.Rd @@ -1,25 +1,25 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/CallbackOptimization.R -\name{CallbackOptimization} -\alias{CallbackOptimization} -\title{Create Optimization Callback} +% Please edit documentation in R/CallbackBatch.R +\name{CallbackBatch} +\alias{CallbackBatch} +\title{Create Batch Optimization Callback} \description{ -Specialized \link[mlr3misc:Callback]{mlr3misc::Callback} for optimization. +Specialized \link[mlr3misc:Callback]{mlr3misc::Callback} for batch optimization. Callbacks allow to customize the behavior of processes in bbotk. -The \code{\link[=callback_optimization]{callback_optimization()}} function creates a \link{CallbackOptimization}. +The \code{\link[=callback_batch]{callback_batch()}} function creates a \link{CallbackBatch}. Predefined callbacks are stored in the \link[mlr3misc:Dictionary]{dictionary} \link{mlr_callbacks} and can be retrieved with \code{\link[=clbk]{clbk()}}. -For more information on optimization callbacks see \code{\link[=callback_optimization]{callback_optimization()}}. +For more information on optimization callbacks see \code{\link[=callback_batch]{callback_batch()}}. } \examples{ # write archive to disk -callback_optimization("bbotk.backup", +callback_batch("bbotk.backup", on_optimization_end = function(callback, context) { saveRDS(context$instance$archive, "archive.rds") } ) } \section{Super class}{ -\code{\link[mlr3misc:Callback]{mlr3misc::Callback}} -> \code{CallbackOptimization} +\code{\link[mlr3misc:Callback]{mlr3misc::Callback}} -> \code{CallbackBatch} } \section{Public fields}{ \if{html}{\out{
}} @@ -49,7 +49,7 @@ Called in \code{Optimizer$optimize()}.} \section{Methods}{ \subsection{Public methods}{ \itemize{ -\item \href{#method-CallbackOptimization-clone}{\code{CallbackOptimization$clone()}} +\item \href{#method-CallbackBatch-clone}{\code{CallbackBatch$clone()}} } } \if{html}{\out{ @@ -64,12 +64,12 @@ Called in \code{Optimizer$optimize()}.} }} \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-CallbackOptimization-clone}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-CallbackBatch-clone}{}}} \subsection{Method \code{clone()}}{ The objects of this class are cloneable with this method. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{CallbackOptimization$clone(deep = FALSE)}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{CallbackBatch$clone(deep = FALSE)}\if{html}{\out{
}} } \subsection{Arguments}{ diff --git a/man/ContextAsync.Rd b/man/ContextAsync.Rd new file mode 100644 index 000000000..66497e9ef --- /dev/null +++ b/man/ContextAsync.Rd @@ -0,0 +1,85 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ContextAsync.R +\name{ContextAsync} +\alias{ContextAsync} +\title{Asynchronous Optimization Context} +\description{ +A \link{CallbackAsync} accesses and modifies data during the optimization via the \code{ContextAsync}. +See the section on active bindings for a list of modifiable objects. +See \code{\link[=callback_async]{callback_async()}} for a list of stages which access \code{ContextAsync}. +} +\details{ +Changes to \verb{$instance} and \verb{$optimizer} in the stages executed on the workers are not reflected in the main process. +} +\section{Super class}{ +\code{\link[mlr3misc:Context]{mlr3misc::Context}} -> \code{ContextAsync} +} +\section{Public fields}{ +\if{html}{\out{
}} +\describe{ +\item{\code{instance}}{(\link{OptimInstance}).} + +\item{\code{optimizer}}{(\link{Optimizer}).} +} +\if{html}{\out{
}} +} +\section{Active bindings}{ +\if{html}{\out{
}} +\describe{ +\item{\code{result}}{(\link[data.table:data.table]{data.table::data.table})\cr +The result of the optimization.} +} +\if{html}{\out{
}} +} +\section{Methods}{ +\subsection{Public methods}{ +\itemize{ +\item \href{#method-ContextAsync-new}{\code{ContextAsync$new()}} +\item \href{#method-ContextAsync-clone}{\code{ContextAsync$clone()}} +} +} +\if{html}{\out{ +
Inherited methods + +
+}} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-ContextAsync-new}{}}} +\subsection{Method \code{new()}}{ +Creates a new instance of this \link[R6:R6Class]{R6} class. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{ContextAsync$new(inst, optimizer)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{inst}}{(\link{OptimInstance}).} + +\item{\code{optimizer}}{(\link{Optimizer}).} +} +\if{html}{\out{
}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-ContextAsync-clone}{}}} +\subsection{Method \code{clone()}}{ +The objects of this class are cloneable with this method. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{ContextAsync$clone(deep = FALSE)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{deep}}{Whether to make a deep clone.} +} +\if{html}{\out{
}} +} +} +} diff --git a/man/ContextOptimization.Rd b/man/ContextBatch.Rd similarity index 62% rename from man/ContextOptimization.Rd rename to man/ContextBatch.Rd index 24aea99eb..303ccf052 100644 --- a/man/ContextOptimization.Rd +++ b/man/ContextBatch.Rd @@ -1,15 +1,15 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/ContextOptimization.R -\name{ContextOptimization} -\alias{ContextOptimization} -\title{Optimization Context} +% Please edit documentation in R/ContextBatch.R +\name{ContextBatch} +\alias{ContextBatch} +\title{Batch Optimization Context} \description{ -The \link{ContextOptimization} allows \link[mlr3misc:Callback]{mlr3misc::Callback}s to access and modify data while optimization. -See section on active bindings for a list of modifiable objects. -See \code{\link[=callback_optimization]{callback_optimization()}} for a list of stages which access \link{ContextOptimization}. +A \link{CallbackBatch} accesses and modifies data during the optimization via the \code{ContextBatch}. +See the section on active bindings for a list of modifiable objects. +See \code{\link[=callback_batch]{callback_batch()}} for a list of stages which that \code{ContextBatch}. } \section{Super class}{ -\code{\link[mlr3misc:Context]{mlr3misc::Context}} -> \code{ContextOptimization} +\code{\link[mlr3misc:Context]{mlr3misc::Context}} -> \code{ContextBatch} } \section{Public fields}{ \if{html}{\out{
}} @@ -35,8 +35,8 @@ The result of the optimization.} \section{Methods}{ \subsection{Public methods}{ \itemize{ -\item \href{#method-ContextOptimization-new}{\code{ContextOptimization$new()}} -\item \href{#method-ContextOptimization-clone}{\code{ContextOptimization$clone()}} +\item \href{#method-ContextBatch-new}{\code{ContextBatch$new()}} +\item \href{#method-ContextBatch-clone}{\code{ContextBatch$clone()}} } } \if{html}{\out{ @@ -48,18 +48,18 @@ The result of the optimization.} }} \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-ContextOptimization-new}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-ContextBatch-new}{}}} \subsection{Method \code{new()}}{ Creates a new instance of this \link[R6:R6Class]{R6} class. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{ContextOptimization$new(instance, optimizer)}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{ContextBatch$new(inst, optimizer)}\if{html}{\out{
}} } \subsection{Arguments}{ \if{html}{\out{
}} \describe{ -\item{\code{instance}}{(\link{OptimInstance}).} +\item{\code{inst}}{(\link{OptimInstance}).} \item{\code{optimizer}}{(\link{Optimizer}).} } @@ -67,12 +67,12 @@ Creates a new instance of this \link[R6:R6Class]{R6} class. } } \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-ContextOptimization-clone}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-ContextBatch-clone}{}}} \subsection{Method \code{clone()}}{ The objects of this class are cloneable with this method. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{ContextOptimization$clone(deep = FALSE)}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{ContextBatch$clone(deep = FALSE)}\if{html}{\out{
}} } \subsection{Arguments}{ diff --git a/man/Objective.Rd b/man/Objective.Rd index 02d3123db..2239625d6 100644 --- a/man/Objective.Rd +++ b/man/Objective.Rd @@ -2,20 +2,22 @@ % Please edit documentation in R/Objective.R \name{Objective} \alias{Objective} -\title{Objective function with domain and co-domain} +\title{Objective Function with Domain and Codomain} \description{ -Describes a black-box objective function that maps an arbitrary domain to a -numerical codomain. +The \code{Objective} class describes a black-box objective function that maps an arbitrary domain to a numerical codomain. } -\section{Technical details}{ - -\code{Objective} objects can have the following properties: \code{"noisy"}, -\code{"deterministic"}, \code{"single-crit"} and \code{"multi-crit"}. +\details{ +\code{Objective} objects can have the following properties: \code{"noisy"}, \code{"deterministic"}, \code{"single-crit"} and \code{"multi-crit"}. } - \section{Public fields}{ \if{html}{\out{
}} \describe{ +\item{\code{callbacks}}{(list of \link[mlr3misc:Callback]{mlr3misc::Callback})\cr +Callbacks applied during the optimization.} + +\item{\code{context}}{(\link{Context})\cr +Stores the context for the callbacks.} + \item{\code{id}}{(\code{character(1)})).} \item{\code{properties}}{(\code{character()}).} @@ -96,8 +98,7 @@ be minimized or maximized. The default is to minimize each component.} Changeable constants or parameters that are not subject to tuning can be stored and accessed here.} \item{\code{check_values}}{(\code{logical(1)})\cr -Should points before the evaluation and the results be checked for -validity?} +Should points before the evaluation and the results be checked for validity?} } \if{html}{\out{
}} } diff --git a/man/ObjectiveRFun.Rd b/man/ObjectiveRFun.Rd index ed6f8eb42..861e1fbe6 100644 --- a/man/ObjectiveRFun.Rd +++ b/man/ObjectiveRFun.Rd @@ -102,8 +102,7 @@ be minimized or maximized. The default is to minimize each component.} Changeable constants or parameters that are not subject to tuning can be stored and accessed here.} \item{\code{check_values}}{(\code{logical(1)})\cr -Should points before the evaluation and the results be checked for -validity?} +Should points before the evaluation and the results be checked for validity?} } \if{html}{\out{
}} } diff --git a/man/ObjectiveRFunDt.Rd b/man/ObjectiveRFunDt.Rd index 09f835a1d..ca3faaaa3 100644 --- a/man/ObjectiveRFunDt.Rd +++ b/man/ObjectiveRFunDt.Rd @@ -77,8 +77,7 @@ be minimized or maximized. The default is to minimize each component.} Changeable constants or parameters that are not subject to tuning can be stored and accessed here.} \item{\code{check_values}}{(\code{logical(1)})\cr -Should points before the evaluation and the results be checked for -validity?} +Should points before the evaluation and the results be checked for validity?} } \if{html}{\out{
}} } diff --git a/man/ObjectiveRFunMany.Rd b/man/ObjectiveRFunMany.Rd index a8eaca30f..23ed5d1ee 100644 --- a/man/ObjectiveRFunMany.Rd +++ b/man/ObjectiveRFunMany.Rd @@ -102,8 +102,7 @@ be minimized or maximized. The default is to minimize each component.} Changeable constants or parameters that are not subject to tuning can be stored and accessed here.} \item{\code{check_values}}{(\code{logical(1)})\cr -Should points before the evaluation and the results be checked for -validity?} +Should points before the evaluation and the results be checked for validity?} } \if{html}{\out{
}} } diff --git a/man/OptimInstance.Rd b/man/OptimInstance.Rd index 26f1f44de..b8268c00b 100644 --- a/man/OptimInstance.Rd +++ b/man/OptimInstance.Rd @@ -2,36 +2,33 @@ % Please edit documentation in R/OptimInstance.R \name{OptimInstance} \alias{OptimInstance} -\title{Optimization Instance with budget and archive} +\title{Optimization Instance} \description{ -Abstract base class. +The \code{OptimInstance} specifies an optimization problem for an \link{Optimizer}. } -\section{Technical details}{ - -The \link{Optimizer} writes the final result to the \code{.result} field by using -the \verb{$assign_result()} method. \code{.result} stores a \link[data.table:data.table]{data.table::data.table} -consisting of x values in the \emph{search space}, (transformed) x values in the -\emph{domain space} and y values in the \emph{codomain space} of the \link{Objective}. The -user can access the results with active bindings (see below). +\details{ +\code{OptimInstance} is an abstract base class that implements the base functionality each instance must provide. +The \link{Optimizer} writes the final result to the \code{.result} field by using the \verb{$assign_result()} method. +\code{.result} stores a \link[data.table:data.table]{data.table::data.table} consisting of x values in the \emph{search space}, (transformed) x values in the \emph{domain space} and y values in the \emph{codomain space} of the \link{Objective}. +The user can access the results with active bindings (see below). } - \section{Public fields}{ \if{html}{\out{
}} \describe{ -\item{\code{objective}}{(\link{Objective}).} +\item{\code{objective}}{(\link{Objective})\cr +Objective function of the instance.} -\item{\code{search_space}}{(\link[paradox:ParamSet]{paradox::ParamSet}).} +\item{\code{search_space}}{(\link[paradox:ParamSet]{paradox::ParamSet})\cr +Specification of the search space for the \link{Optimizer}.} -\item{\code{terminator}}{(\link{Terminator}).} +\item{\code{terminator}}{\link{Terminator}\cr +Termination criterion of the optimization.} -\item{\code{archive}}{(\link{Archive}).} +\item{\code{archive}}{(\link{Archive})\cr +Contains all performed function calls of the Objective.} \item{\code{progressor}}{(\code{progressor()})\cr Stores \code{progressor} function.} - -\item{\code{objective_multiplicator}}{(\code{integer()}).} - -\item{\code{callbacks}}{(List of \link{CallbackOptimization}s).} } \if{html}{\out{
}} } @@ -44,12 +41,6 @@ Get result} \item{\code{result_x_search_space}}{(\link[data.table:data.table]{data.table::data.table})\cr x part of the result in the \emph{search space}.} -\item{\code{result_x_domain}}{(\code{list()})\cr -(transformed) x part of the result in the \emph{domain space} of the objective.} - -\item{\code{result_y}}{(\code{numeric()})\cr -Optimal outcome.} - \item{\code{is_terminated}}{(\code{logical(1)}).} } \if{html}{\out{
}} @@ -60,9 +51,7 @@ Optimal outcome.} \item \href{#method-OptimInstance-new}{\code{OptimInstance$new()}} \item \href{#method-OptimInstance-format}{\code{OptimInstance$format()}} \item \href{#method-OptimInstance-print}{\code{OptimInstance$print()}} -\item \href{#method-OptimInstance-eval_batch}{\code{OptimInstance$eval_batch()}} \item \href{#method-OptimInstance-assign_result}{\code{OptimInstance$assign_result()}} -\item \href{#method-OptimInstance-objective_function}{\code{OptimInstance$objective_function()}} \item \href{#method-OptimInstance-clear}{\code{OptimInstance$clear()}} \item \href{#method-OptimInstance-clone}{\code{OptimInstance$clone()}} } @@ -77,16 +66,17 @@ Creates a new instance of this \link[R6:R6Class]{R6} class. objective, search_space = NULL, terminator, - keep_evals = "all", check_values = TRUE, - callbacks = list() + callbacks = NULL, + archive = NULL )}\if{html}{\out{}} } \subsection{Arguments}{ \if{html}{\out{
}} \describe{ -\item{\code{objective}}{(\link{Objective}).} +\item{\code{objective}}{(\link{Objective})\cr +Objective function.} \item{\code{search_space}}{(\link[paradox:ParamSet]{paradox::ParamSet})\cr Specifies the search space for the \link{Optimizer}. The \link[paradox:ParamSet]{paradox::ParamSet} @@ -95,17 +85,16 @@ a set of parameters together with a \code{trafo} function that transforms values from the search space to values of the domain. Depending on the context, this value defaults to the domain of the objective.} -\item{\code{terminator}}{(\link{Terminator}).} - -\item{\code{keep_evals}}{(\code{character(1)})\cr -Keep \code{all} or only \code{best} evaluations in archive?} +\item{\code{terminator}}{\link{Terminator}\cr +Termination criterion.} \item{\code{check_values}}{(\code{logical(1)})\cr -Should x-values that are added to the archive be checked for validity? -Search space that is logged into archive.} +Should points before the evaluation and the results be checked for validity?} \item{\code{callbacks}}{(list of \link[mlr3misc:Callback]{mlr3misc::Callback})\cr List of callbacks.} + +\item{\code{archive}}{(\link{Archive}).} } \if{html}{\out{
}} } @@ -145,37 +134,11 @@ Printer. } } \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimInstance-eval_batch}{}}} -\subsection{Method \code{eval_batch()}}{ -Evaluates all input values in \code{xdt} by calling -the \link{Objective}. Applies possible transformations to the input values -and writes the results to the \link{Archive}. - -Before each batch-evaluation, the \link{Terminator} is checked, and if it -is positive, an exception of class \code{terminated_error} is raised. This -function should be internally called by the \link{Optimizer}. -\subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimInstance$eval_batch(xdt)}\if{html}{\out{
}} -} - -\subsection{Arguments}{ -\if{html}{\out{
}} -\describe{ -\item{\code{xdt}}{(\code{data.table::data.table()})\cr -x values as \code{data.table()} with one point per row. Contains the value in -the \emph{search space} of the \link{OptimInstance} object. Can contain additional -columns for extra information.} -} -\if{html}{\out{
}} -} -} -\if{html}{\out{
}} \if{html}{\out{}} \if{latex}{\out{\hypertarget{method-OptimInstance-assign_result}{}}} \subsection{Method \code{assign_result()}}{ -The \link{Optimizer} object writes the best found point -and estimated performance value here. For internal use. +The \link{Optimizer} object writes the best found point and estimated performance value here. +For internal use. \subsection{Usage}{ \if{html}{\out{
}}\preformatted{OptimInstance$assign_result(xdt, y)}\if{html}{\out{
}} } @@ -195,32 +158,6 @@ Optimal outcome.} } } \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimInstance-objective_function}{}}} -\subsection{Method \code{objective_function()}}{ -Evaluates (untransformed) points of only numeric values. Returns a -numeric scalar for single-crit or a numeric vector for multi-crit. The -return value(s) are negated if the measure is maximized. Internally, -\verb{$eval_batch()} is called with a single row. This function serves as a -objective function for optimizers of numeric spaces - which should always -be minimized. -\subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimInstance$objective_function(x)}\if{html}{\out{
}} -} - -\subsection{Arguments}{ -\if{html}{\out{
}} -\describe{ -\item{\code{x}}{(\code{numeric()})\cr -Untransformed points.} -} -\if{html}{\out{
}} -} -\subsection{Returns}{ -Objective value as \code{numeric(1)}, negated for maximization problems. -} -} -\if{html}{\out{
}} \if{html}{\out{}} \if{latex}{\out{\hypertarget{method-OptimInstance-clear}{}}} \subsection{Method \code{clear()}}{ diff --git a/man/OptimInstanceAsync.Rd b/man/OptimInstanceAsync.Rd new file mode 100644 index 000000000..122fdf7bc --- /dev/null +++ b/man/OptimInstanceAsync.Rd @@ -0,0 +1,114 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/OptimInstanceAsync.R +\name{OptimInstanceAsync} +\alias{OptimInstanceAsync} +\title{Optimization Instance for Asynchronous Optimization} +\description{ +The \code{OptimInstanceAsync} specifies an optimization problem for an \link{OptimizerAsync}. +The function \code{\link[=oi_async]{oi_async()}} creates an \link{OptimInstanceAsyncSingleCrit} or \link{OptimInstanceAsyncMultiCrit}. +} +\details{ +\code{OptimInstanceAsync} is an abstract base class that implements the base functionality each instance must provide. +} +\section{Super class}{ +\code{\link[bbotk:OptimInstance]{bbotk::OptimInstance}} -> \code{OptimInstanceAsync} +} +\section{Public fields}{ +\if{html}{\out{
}} +\describe{ +\item{\code{rush}}{(\code{Rush})\cr +Rush controller for parallel optimization.} +} +\if{html}{\out{
}} +} +\section{Methods}{ +\subsection{Public methods}{ +\itemize{ +\item \href{#method-OptimInstanceAsync-new}{\code{OptimInstanceAsync$new()}} +\item \href{#method-OptimInstanceAsync-print}{\code{OptimInstanceAsync$print()}} +\item \href{#method-OptimInstanceAsync-clear}{\code{OptimInstanceAsync$clear()}} +} +} +\if{html}{\out{ +
Inherited methods + +
+}} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimInstanceAsync-new}{}}} +\subsection{Method \code{new()}}{ +Creates a new instance of this \link[R6:R6Class]{R6} class. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimInstanceAsync$new( + objective, + search_space = NULL, + terminator, + check_values = FALSE, + callbacks = NULL, + archive = NULL, + rush = NULL +)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{objective}}{(\link{Objective})\cr +Objective function.} + +\item{\code{search_space}}{(\link[paradox:ParamSet]{paradox::ParamSet})\cr +Specifies the search space for the \link{Optimizer}. The \link[paradox:ParamSet]{paradox::ParamSet} +describes either a subset of the \code{domain} of the \link{Objective} or it describes +a set of parameters together with a \code{trafo} function that transforms values +from the search space to values of the domain. Depending on the context, this +value defaults to the domain of the objective.} + +\item{\code{terminator}}{\link{Terminator}\cr +Termination criterion.} + +\item{\code{check_values}}{(\code{logical(1)})\cr +Should points before the evaluation and the results be checked for validity?} + +\item{\code{callbacks}}{(list of \link[mlr3misc:Callback]{mlr3misc::Callback})\cr +List of callbacks.} + +\item{\code{archive}}{(\link{Archive}).} + +\item{\code{rush}}{(\code{Rush})\cr +If a rush instance is supplied, the tuning runs without batches.} +} +\if{html}{\out{
}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimInstanceAsync-print}{}}} +\subsection{Method \code{print()}}{ +Printer. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimInstanceAsync$print(...)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{...}}{(ignored).} +} +\if{html}{\out{
}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimInstanceAsync-clear}{}}} +\subsection{Method \code{clear()}}{ +Reset terminator and clear all evaluation results from archive and results. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimInstanceAsync$clear()}\if{html}{\out{
}} +} + +} +} diff --git a/man/OptimInstanceAsyncMultiCrit.Rd b/man/OptimInstanceAsyncMultiCrit.Rd new file mode 100644 index 000000000..4f9cbcb2f --- /dev/null +++ b/man/OptimInstanceAsyncMultiCrit.Rd @@ -0,0 +1,130 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/OptimInstanceAsyncMultiCrit.R +\name{OptimInstanceAsyncMultiCrit} +\alias{OptimInstanceAsyncMultiCrit} +\title{Multi Criteria Optimization Instance for Asynchronous Optimization} +\description{ +The \link{OptimInstanceAsyncMultiCrit} specifies an optimization problem for an \link{OptimizerAsync}. +The function \code{\link[=oi_async]{oi_async()}} creates an \link{OptimInstanceAsyncMultiCrit}. +} +\section{Super classes}{ +\code{\link[bbotk:OptimInstance]{bbotk::OptimInstance}} -> \code{\link[bbotk:OptimInstanceAsync]{bbotk::OptimInstanceAsync}} -> \code{OptimInstanceAsyncMultiCrit} +} +\section{Active bindings}{ +\if{html}{\out{
}} +\describe{ +\item{\code{result_x_domain}}{(\code{list()})\cr +(transformed) x part of the result in the \emph{domain space} of the objective.} + +\item{\code{result_y}}{(\code{numeric(1)})\cr +Optimal outcome.} +} +\if{html}{\out{
}} +} +\section{Methods}{ +\subsection{Public methods}{ +\itemize{ +\item \href{#method-OptimInstanceAsyncMultiCrit-new}{\code{OptimInstanceAsyncMultiCrit$new()}} +\item \href{#method-OptimInstanceAsyncMultiCrit-assign_result}{\code{OptimInstanceAsyncMultiCrit$assign_result()}} +\item \href{#method-OptimInstanceAsyncMultiCrit-clone}{\code{OptimInstanceAsyncMultiCrit$clone()}} +} +} +\if{html}{\out{ +
Inherited methods + +
+}} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimInstanceAsyncMultiCrit-new}{}}} +\subsection{Method \code{new()}}{ +Creates a new instance of this \link[R6:R6Class]{R6} class. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimInstanceAsyncMultiCrit$new( + objective, + search_space = NULL, + terminator, + check_values = FALSE, + callbacks = NULL, + archive = NULL, + rush = NULL +)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{objective}}{(\link{Objective})\cr +Objective function.} + +\item{\code{search_space}}{(\link[paradox:ParamSet]{paradox::ParamSet})\cr +Specifies the search space for the \link{Optimizer}. The \link[paradox:ParamSet]{paradox::ParamSet} +describes either a subset of the \code{domain} of the \link{Objective} or it describes +a set of parameters together with a \code{trafo} function that transforms values +from the search space to values of the domain. Depending on the context, this +value defaults to the domain of the objective.} + +\item{\code{terminator}}{\link{Terminator}\cr +Termination criterion.} + +\item{\code{check_values}}{(\code{logical(1)})\cr +Should points before the evaluation and the results be checked for validity?} + +\item{\code{callbacks}}{(list of \link[mlr3misc:Callback]{mlr3misc::Callback})\cr +List of callbacks.} + +\item{\code{archive}}{(\link{Archive}).} + +\item{\code{rush}}{(\code{Rush})\cr +If a rush instance is supplied, the tuning runs without batches.} +} +\if{html}{\out{
}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimInstanceAsyncMultiCrit-assign_result}{}}} +\subsection{Method \code{assign_result()}}{ +The \link{OptimizerAsync} writes the best found points and estimated performance values here (probably the Pareto set / front). +For internal use. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimInstanceAsyncMultiCrit$assign_result(xdt, ydt)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{xdt}}{(\code{\link[data.table:data.table]{data.table::data.table()}})\cr +Set of untransformed points / points from the \emph{search space}. +One point per row, e.g. \code{data.table(x1 = c(1, 3), x2 = c(2, 4))}. +Column names have to match ids of the \code{search_space}. +However, \code{xdt} can contain additional columns.} + +\item{\code{ydt}}{(\code{numeric(1)})\cr +Optimal outcomes, e.g. the Pareto front.} +} +\if{html}{\out{
}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimInstanceAsyncMultiCrit-clone}{}}} +\subsection{Method \code{clone()}}{ +The objects of this class are cloneable with this method. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimInstanceAsyncMultiCrit$clone(deep = FALSE)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{deep}}{Whether to make a deep clone.} +} +\if{html}{\out{
}} +} +} +} diff --git a/man/OptimInstanceAsyncSingleCrit.Rd b/man/OptimInstanceAsyncSingleCrit.Rd new file mode 100644 index 000000000..4a18e520c --- /dev/null +++ b/man/OptimInstanceAsyncSingleCrit.Rd @@ -0,0 +1,130 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/OptimInstanceAsyncSingleCrit.R +\name{OptimInstanceAsyncSingleCrit} +\alias{OptimInstanceAsyncSingleCrit} +\title{Single Criterion Optimization Instance for Asynchronous Optimization} +\description{ +The \code{OptimInstanceAsyncSingleCrit} specifies an optimization problem for an \link{OptimizerAsync}. +The function \code{\link[=oi_async]{oi_async()}} creates an \link{OptimInstanceAsyncSingleCrit}. +} +\section{Super classes}{ +\code{\link[bbotk:OptimInstance]{bbotk::OptimInstance}} -> \code{\link[bbotk:OptimInstanceAsync]{bbotk::OptimInstanceAsync}} -> \code{OptimInstanceAsyncSingleCrit} +} +\section{Active bindings}{ +\if{html}{\out{
}} +\describe{ +\item{\code{result_x_domain}}{(\code{list()})\cr +(transformed) x part of the result in the \emph{domain space} of the objective.} + +\item{\code{result_y}}{(\code{numeric()})\cr +Optimal outcome.} +} +\if{html}{\out{
}} +} +\section{Methods}{ +\subsection{Public methods}{ +\itemize{ +\item \href{#method-OptimInstanceAsyncSingleCrit-new}{\code{OptimInstanceAsyncSingleCrit$new()}} +\item \href{#method-OptimInstanceAsyncSingleCrit-assign_result}{\code{OptimInstanceAsyncSingleCrit$assign_result()}} +\item \href{#method-OptimInstanceAsyncSingleCrit-clone}{\code{OptimInstanceAsyncSingleCrit$clone()}} +} +} +\if{html}{\out{ +
Inherited methods + +
+}} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimInstanceAsyncSingleCrit-new}{}}} +\subsection{Method \code{new()}}{ +Creates a new instance of this \link[R6:R6Class]{R6} class. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimInstanceAsyncSingleCrit$new( + objective, + search_space = NULL, + terminator, + check_values = FALSE, + callbacks = NULL, + archive = NULL, + rush = NULL +)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{objective}}{(\link{Objective})\cr +Objective function.} + +\item{\code{search_space}}{(\link[paradox:ParamSet]{paradox::ParamSet})\cr +Specifies the search space for the \link{Optimizer}. The \link[paradox:ParamSet]{paradox::ParamSet} +describes either a subset of the \code{domain} of the \link{Objective} or it describes +a set of parameters together with a \code{trafo} function that transforms values +from the search space to values of the domain. Depending on the context, this +value defaults to the domain of the objective.} + +\item{\code{terminator}}{\link{Terminator}\cr +Termination criterion.} + +\item{\code{check_values}}{(\code{logical(1)})\cr +Should points before the evaluation and the results be checked for validity?} + +\item{\code{callbacks}}{(list of \link[mlr3misc:Callback]{mlr3misc::Callback})\cr +List of callbacks.} + +\item{\code{archive}}{(\link{Archive}).} + +\item{\code{rush}}{(\code{Rush})\cr +If a rush instance is supplied, the tuning runs without batches.} +} +\if{html}{\out{
}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimInstanceAsyncSingleCrit-assign_result}{}}} +\subsection{Method \code{assign_result()}}{ +The \link{OptimizerAsync} object writes the best found point and estimated performance value here. +For internal use. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimInstanceAsyncSingleCrit$assign_result(xdt, y)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{xdt}}{(\code{\link[data.table:data.table]{data.table::data.table()}})\cr +Set of untransformed points / points from the \emph{search space}. +One point per row, e.g. \code{data.table(x1 = c(1, 3), x2 = c(2, 4))}. +Column names have to match ids of the \code{search_space}. +However, \code{xdt} can contain additional columns.} + +\item{\code{y}}{(\code{numeric(1)})\cr +Optimal outcome.} +} +\if{html}{\out{
}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimInstanceAsyncSingleCrit-clone}{}}} +\subsection{Method \code{clone()}}{ +The objects of this class are cloneable with this method. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimInstanceAsyncSingleCrit$clone(deep = FALSE)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{deep}}{Whether to make a deep clone.} +} +\if{html}{\out{
}} +} +} +} diff --git a/man/OptimInstanceBatch.Rd b/man/OptimInstanceBatch.Rd new file mode 100644 index 000000000..ac3592041 --- /dev/null +++ b/man/OptimInstanceBatch.Rd @@ -0,0 +1,170 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/OptimInstanceBatch.R +\name{OptimInstanceBatch} +\alias{OptimInstanceBatch} +\title{Optimization Instance for Batch Optimization} +\description{ +The \code{OptimInstanceBatch} specifies an optimization problem for an \link{OptimizerBatch}. +The function \code{\link[=oi]{oi()}} creates an \link{OptimInstanceAsyncSingleCrit} or \link{OptimInstanceAsyncMultiCrit}. +} +\section{Super class}{ +\code{\link[bbotk:OptimInstance]{bbotk::OptimInstance}} -> \code{OptimInstanceBatch} +} +\section{Public fields}{ +\if{html}{\out{
}} +\describe{ +\item{\code{objective_multiplicator}}{(\code{integer()}).} +} +\if{html}{\out{
}} +} +\section{Active bindings}{ +\if{html}{\out{
}} +\describe{ +\item{\code{result}}{(\link[data.table:data.table]{data.table::data.table})\cr +Get result} + +\item{\code{result_x_search_space}}{(\link[data.table:data.table]{data.table::data.table})\cr +x part of the result in the \emph{search space}.} + +\item{\code{result_x_domain}}{(\code{list()})\cr +(transformed) x part of the result in the \emph{domain space} of the objective.} + +\item{\code{result_y}}{(\code{numeric()})\cr +Optimal outcome.} + +\item{\code{is_terminated}}{(\code{logical(1)}).} +} +\if{html}{\out{
}} +} +\section{Methods}{ +\subsection{Public methods}{ +\itemize{ +\item \href{#method-OptimInstanceBatch-new}{\code{OptimInstanceBatch$new()}} +\item \href{#method-OptimInstanceBatch-eval_batch}{\code{OptimInstanceBatch$eval_batch()}} +\item \href{#method-OptimInstanceBatch-objective_function}{\code{OptimInstanceBatch$objective_function()}} +\item \href{#method-OptimInstanceBatch-clone}{\code{OptimInstanceBatch$clone()}} +} +} +\if{html}{\out{ +
Inherited methods + +
+}} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimInstanceBatch-new}{}}} +\subsection{Method \code{new()}}{ +Creates a new instance of this \link[R6:R6Class]{R6} class. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimInstanceBatch$new( + objective, + search_space = NULL, + terminator, + check_values = TRUE, + callbacks = NULL, + archive = NULL +)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{objective}}{(\link{Objective})\cr +Objective function.} + +\item{\code{search_space}}{(\link[paradox:ParamSet]{paradox::ParamSet})\cr +Specifies the search space for the \link{Optimizer}. The \link[paradox:ParamSet]{paradox::ParamSet} +describes either a subset of the \code{domain} of the \link{Objective} or it describes +a set of parameters together with a \code{trafo} function that transforms values +from the search space to values of the domain. Depending on the context, this +value defaults to the domain of the objective.} + +\item{\code{terminator}}{\link{Terminator}\cr +Termination criterion.} + +\item{\code{check_values}}{(\code{logical(1)})\cr +Should points before the evaluation and the results be checked for validity?} + +\item{\code{callbacks}}{(list of \link[mlr3misc:Callback]{mlr3misc::Callback})\cr +List of callbacks.} + +\item{\code{archive}}{(\link{Archive}).} +} +\if{html}{\out{
}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimInstanceBatch-eval_batch}{}}} +\subsection{Method \code{eval_batch()}}{ +Evaluates all input values in \code{xdt} by calling +the \link{Objective}. Applies possible transformations to the input values +and writes the results to the \link{Archive}. + +Before each batch-evaluation, the \link{Terminator} is checked, and if it +is positive, an exception of class \code{terminated_error} is raised. This +function should be internally called by the \link{Optimizer}. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimInstanceBatch$eval_batch(xdt)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{xdt}}{(\code{data.table::data.table()})\cr +x values as \code{data.table()} with one point per row. Contains the value in +the \emph{search space} of the \link{OptimInstance} object. Can contain additional +columns for extra information.} +} +\if{html}{\out{
}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimInstanceBatch-objective_function}{}}} +\subsection{Method \code{objective_function()}}{ +Evaluates (untransformed) points of only numeric values. Returns a +numeric scalar for single-crit or a numeric vector for multi-crit. The +return value(s) are negated if the measure is maximized. Internally, +\verb{$eval_batch()} is called with a single row. This function serves as a +objective function for optimizers of numeric spaces - which should always +be minimized. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimInstanceBatch$objective_function(x)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{x}}{(\code{numeric()})\cr +Untransformed points.} +} +\if{html}{\out{
}} +} +\subsection{Returns}{ +Objective value as \code{numeric(1)}, negated for maximization problems. +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimInstanceBatch-clone}{}}} +\subsection{Method \code{clone()}}{ +The objects of this class are cloneable with this method. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimInstanceBatch$clone(deep = FALSE)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{deep}}{Whether to make a deep clone.} +} +\if{html}{\out{
}} +} +} +} diff --git a/man/OptimInstanceMultiCrit.Rd b/man/OptimInstanceBatchMultiCrit.Rd similarity index 56% rename from man/OptimInstanceMultiCrit.Rd rename to man/OptimInstanceBatchMultiCrit.Rd index 94c3d0925..e60d27673 100644 --- a/man/OptimInstanceMultiCrit.Rd +++ b/man/OptimInstanceBatchMultiCrit.Rd @@ -1,22 +1,14 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/OptimInstanceMultiCrit.R -\name{OptimInstanceMultiCrit} -\alias{OptimInstanceMultiCrit} -\title{Optimization Instance with budget and archive} +% Please edit documentation in R/OptimInstanceBatchMultiCrit.R +\name{OptimInstanceBatchMultiCrit} +\alias{OptimInstanceBatchMultiCrit} +\title{Multi Criteria Optimization Instance for Batch Optimization} \description{ -Wraps a multi-criteria \link{Objective} function with extra services for -convenient evaluation. Inherits from \link{OptimInstance}. -\itemize{ -\item Automatic storing of results in an \link{Archive} after evaluation. -\item Automatic checking for termination. Evaluations of design points are -performed in batches. Before a batch is evaluated, the \link{Terminator} is -queried for the remaining budget. If the available budget is exhausted, an -exception is raised, and no further evaluations can be performed from this -point on. -} +The \link{OptimInstanceBatchMultiCrit} specifies an optimization problem for an \link{OptimizerBatch}. +The function \code{\link[=oi]{oi()}} creates an \link{OptimInstanceBatchMultiCrit}. } -\section{Super class}{ -\code{\link[bbotk:OptimInstance]{bbotk::OptimInstance}} -> \code{OptimInstanceMultiCrit} +\section{Super classes}{ +\code{\link[bbotk:OptimInstance]{bbotk::OptimInstance}} -> \code{\link[bbotk:OptimInstanceBatch]{bbotk::OptimInstanceBatch}} -> \code{OptimInstanceBatchMultiCrit} } \section{Active bindings}{ \if{html}{\out{
}} @@ -32,42 +24,43 @@ Optimal outcome.} \section{Methods}{ \subsection{Public methods}{ \itemize{ -\item \href{#method-OptimInstanceMultiCrit-new}{\code{OptimInstanceMultiCrit$new()}} -\item \href{#method-OptimInstanceMultiCrit-assign_result}{\code{OptimInstanceMultiCrit$assign_result()}} -\item \href{#method-OptimInstanceMultiCrit-clone}{\code{OptimInstanceMultiCrit$clone()}} +\item \href{#method-OptimInstanceBatchMultiCrit-new}{\code{OptimInstanceBatchMultiCrit$new()}} +\item \href{#method-OptimInstanceBatchMultiCrit-assign_result}{\code{OptimInstanceBatchMultiCrit$assign_result()}} +\item \href{#method-OptimInstanceBatchMultiCrit-clone}{\code{OptimInstanceBatchMultiCrit$clone()}} } } \if{html}{\out{
Inherited methods
}} \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimInstanceMultiCrit-new}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimInstanceBatchMultiCrit-new}{}}} \subsection{Method \code{new()}}{ Creates a new instance of this \link[R6:R6Class]{R6} class. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimInstanceMultiCrit$new( +\if{html}{\out{
}}\preformatted{OptimInstanceBatchMultiCrit$new( objective, search_space = NULL, terminator, - keep_evals = "all", check_values = TRUE, - callbacks = list() + callbacks = NULL, + archive = NULL )}\if{html}{\out{
}} } \subsection{Arguments}{ \if{html}{\out{
}} \describe{ -\item{\code{objective}}{(\link{Objective}).} +\item{\code{objective}}{(\link{Objective})\cr +Objective function.} \item{\code{search_space}}{(\link[paradox:ParamSet]{paradox::ParamSet})\cr Specifies the search space for the \link{Optimizer}. The \link[paradox:ParamSet]{paradox::ParamSet} @@ -76,31 +69,29 @@ a set of parameters together with a \code{trafo} function that transforms values from the search space to values of the domain. Depending on the context, this value defaults to the domain of the objective.} -\item{\code{terminator}}{(\link{Terminator})\cr -Multi-criteria terminator.} - -\item{\code{keep_evals}}{(\code{character(1)})\cr -Keep \code{all} or only \code{best} evaluations in archive?} +\item{\code{terminator}}{\link{Terminator}\cr +Termination criterion.} \item{\code{check_values}}{(\code{logical(1)})\cr -Should x-values that are added to the archive be checked for validity? -Search space that is logged into archive.} +Should points before the evaluation and the results be checked for validity?} \item{\code{callbacks}}{(list of \link[mlr3misc:Callback]{mlr3misc::Callback})\cr List of callbacks.} + +\item{\code{archive}}{(\link{Archive}).} } \if{html}{\out{
}} } } \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimInstanceMultiCrit-assign_result}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimInstanceBatchMultiCrit-assign_result}{}}} \subsection{Method \code{assign_result()}}{ The \link{Optimizer} object writes the best found points and estimated performance values here (probably the Pareto set / front). For internal use. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimInstanceMultiCrit$assign_result(xdt, ydt)}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{OptimInstanceBatchMultiCrit$assign_result(xdt, ydt)}\if{html}{\out{
}} } \subsection{Arguments}{ @@ -112,19 +103,19 @@ One point per row, e.g. \code{data.table(x1 = c(1, 3), x2 = c(2, 4))}. Column names have to match ids of the \code{search_space}. However, \code{xdt} can contain additional columns.} -\item{\code{ydt}}{(\code{numeric(1)})\cr -Optimal outcomes, e.g. the Pareto front.} +\item{\code{ydt}}{(\code{\link[data.table:data.table]{data.table::data.table()}})\cr +Optimal outcome.} } \if{html}{\out{
}} } } \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimInstanceMultiCrit-clone}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimInstanceBatchMultiCrit-clone}{}}} \subsection{Method \code{clone()}}{ The objects of this class are cloneable with this method. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimInstanceMultiCrit$clone(deep = FALSE)}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{OptimInstanceBatchMultiCrit$clone(deep = FALSE)}\if{html}{\out{
}} } \subsection{Arguments}{ diff --git a/man/OptimInstanceSingleCrit.Rd b/man/OptimInstanceBatchSingleCrit.Rd similarity index 54% rename from man/OptimInstanceSingleCrit.Rd rename to man/OptimInstanceBatchSingleCrit.Rd index 0d0dcff70..1d0dea269 100644 --- a/man/OptimInstanceSingleCrit.Rd +++ b/man/OptimInstanceBatchSingleCrit.Rd @@ -1,62 +1,55 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/OptimInstanceSingleCrit.R -\name{OptimInstanceSingleCrit} -\alias{OptimInstanceSingleCrit} -\title{Optimization Instance with budget and archive} +% Please edit documentation in R/OptimInstanceBatchSingleCrit.R +\name{OptimInstanceBatchSingleCrit} +\alias{OptimInstanceBatchSingleCrit} +\title{Single Criterion Optimization Instance for Batch Optimization} \description{ -Wraps a single-criteria \link{Objective} function with extra services for -convenient evaluation. Inherits from \link{OptimInstance}. -\itemize{ -\item Automatic storing of results in an \link{Archive} after evaluation. -\item Automatic checking for termination. Evaluations of design points are -performed in batches. Before a batch is evaluated, the \link{Terminator} is -queried for the remaining budget. If the available budget is exhausted, an -exception is raised, and no further evaluations can be performed from this -point on. -} +The \link{OptimInstanceBatchSingleCrit} specifies an optimization problem for an \link{OptimizerBatch}. +The function \code{\link[=oi]{oi()}} creates an \link{OptimInstanceBatchSingleCrit}. } -\section{Super class}{ -\code{\link[bbotk:OptimInstance]{bbotk::OptimInstance}} -> \code{OptimInstanceSingleCrit} +\section{Super classes}{ +\code{\link[bbotk:OptimInstance]{bbotk::OptimInstance}} -> \code{\link[bbotk:OptimInstanceBatch]{bbotk::OptimInstanceBatch}} -> \code{OptimInstanceBatchSingleCrit} } \section{Methods}{ \subsection{Public methods}{ \itemize{ -\item \href{#method-OptimInstanceSingleCrit-new}{\code{OptimInstanceSingleCrit$new()}} -\item \href{#method-OptimInstanceSingleCrit-assign_result}{\code{OptimInstanceSingleCrit$assign_result()}} -\item \href{#method-OptimInstanceSingleCrit-clone}{\code{OptimInstanceSingleCrit$clone()}} +\item \href{#method-OptimInstanceBatchSingleCrit-new}{\code{OptimInstanceBatchSingleCrit$new()}} +\item \href{#method-OptimInstanceBatchSingleCrit-assign_result}{\code{OptimInstanceBatchSingleCrit$assign_result()}} +\item \href{#method-OptimInstanceBatchSingleCrit-clone}{\code{OptimInstanceBatchSingleCrit$clone()}} } } \if{html}{\out{
Inherited methods
}} \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimInstanceSingleCrit-new}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimInstanceBatchSingleCrit-new}{}}} \subsection{Method \code{new()}}{ Creates a new instance of this \link[R6:R6Class]{R6} class. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimInstanceSingleCrit$new( +\if{html}{\out{
}}\preformatted{OptimInstanceBatchSingleCrit$new( objective, search_space = NULL, terminator, - keep_evals = "all", check_values = TRUE, - callbacks = list() + callbacks = NULL, + archive = NULL )}\if{html}{\out{
}} } \subsection{Arguments}{ \if{html}{\out{
}} \describe{ -\item{\code{objective}}{(\link{Objective}).} +\item{\code{objective}}{(\link{Objective})\cr +Objective function.} \item{\code{search_space}}{(\link[paradox:ParamSet]{paradox::ParamSet})\cr Specifies the search space for the \link{Optimizer}. The \link[paradox:ParamSet]{paradox::ParamSet} @@ -65,29 +58,28 @@ a set of parameters together with a \code{trafo} function that transforms values from the search space to values of the domain. Depending on the context, this value defaults to the domain of the objective.} -\item{\code{terminator}}{(\link{Terminator}).} - -\item{\code{keep_evals}}{(\code{character(1)})\cr -Keep \code{all} or only \code{best} evaluations in archive?} +\item{\code{terminator}}{\link{Terminator}\cr +Termination criterion.} \item{\code{check_values}}{(\code{logical(1)})\cr -Should x-values that are added to the archive be checked for validity? -Search space that is logged into archive.} +Should points before the evaluation and the results be checked for validity?} \item{\code{callbacks}}{(list of \link[mlr3misc:Callback]{mlr3misc::Callback})\cr List of callbacks.} + +\item{\code{archive}}{(\link{Archive}).} } \if{html}{\out{
}} } } \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimInstanceSingleCrit-assign_result}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimInstanceBatchSingleCrit-assign_result}{}}} \subsection{Method \code{assign_result()}}{ The \link{Optimizer} object writes the best found point and estimated performance value here. For internal use. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimInstanceSingleCrit$assign_result(xdt, y)}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{OptimInstanceBatchSingleCrit$assign_result(xdt, y)}\if{html}{\out{
}} } \subsection{Arguments}{ @@ -106,12 +98,12 @@ Optimal outcome.} } } \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimInstanceSingleCrit-clone}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimInstanceBatchSingleCrit-clone}{}}} \subsection{Method \code{clone()}}{ The objects of this class are cloneable with this method. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimInstanceSingleCrit$clone(deep = FALSE)}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{OptimInstanceBatchSingleCrit$clone(deep = FALSE)}\if{html}{\out{
}} } \subsection{Arguments}{ diff --git a/man/Optimizer.Rd b/man/Optimizer.Rd index 6cb0d53a1..865d03dc4 100644 --- a/man/Optimizer.Rd +++ b/man/Optimizer.Rd @@ -4,7 +4,10 @@ \alias{Optimizer} \title{Optimizer} \description{ -Abstract \code{Optimizer} class that implements the base functionality each \code{Optimizer} subclass must provide. +The \code{Optimizer} implements the optimization algorithm. +} +\details{ +\code{Optimizer} is an abstract base class that implements the base functionality each optimizer must provide. A \code{Optimizer} object describes the optimization strategy. A \code{Optimizer} object must write its result to the \verb{$assign_result()} method of the \link{OptimInstance} at the end in order to store the best point and its estimated performance vector. } @@ -59,7 +62,6 @@ A warning is signaled by the constructor if at least one of the packages is not \item \href{#method-Optimizer-format}{\code{Optimizer$format()}} \item \href{#method-Optimizer-print}{\code{Optimizer$print()}} \item \href{#method-Optimizer-help}{\code{Optimizer$help()}} -\item \href{#method-Optimizer-optimize}{\code{Optimizer$optimize()}} \item \href{#method-Optimizer-clone}{\code{Optimizer$clone()}} } } @@ -150,28 +152,6 @@ Opens the corresponding help page referenced by field \verb{$man}. \if{html}{\out{
}}\preformatted{Optimizer$help()}\if{html}{\out{
}} } -} -\if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-Optimizer-optimize}{}}} -\subsection{Method \code{optimize()}}{ -Performs the optimization and writes optimization result into -\link{OptimInstance}. The optimization result is returned but the complete -optimization path is stored in \link{Archive} of \link{OptimInstance}. -\subsection{Usage}{ -\if{html}{\out{
}}\preformatted{Optimizer$optimize(inst)}\if{html}{\out{
}} -} - -\subsection{Arguments}{ -\if{html}{\out{
}} -\describe{ -\item{\code{inst}}{(\link{OptimInstance}).} -} -\if{html}{\out{
}} -} -\subsection{Returns}{ -\link[data.table:data.table]{data.table::data.table}. -} } \if{html}{\out{
}} \if{html}{\out{}} diff --git a/man/OptimizerAsync.Rd b/man/OptimizerAsync.Rd new file mode 100644 index 000000000..aad98fe1f --- /dev/null +++ b/man/OptimizerAsync.Rd @@ -0,0 +1,78 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/OptimizerAsync.R +\name{OptimizerAsync} +\alias{OptimizerAsync} +\title{Asynchronous Optimizer} +\description{ +The \link{OptimizerAsync} implements the asynchronous optimization algorithm. +The optimization is performed asynchronously on a set of workers. +} +\details{ +\link{OptimizerAsync} is the abstract base class for all asynchronous optimizers. +It provides the basic structure for asynchronous optimization algorithms. +The public method \verb{$optimize()} is the main entry point for the optimization and runs in the main process. +The method starts the optimization process by starting the workers and pushing the necessary objects to the workers. +Optionally, a set of points can be created, e.g. an initial design, and pushed to the workers. +The private method \verb{$.optimize()} is the actual optimization algorithm that runs on the workers. +Usually, the method proposes new points, evaluates them, and updates the archive. +} +\section{Super class}{ +\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{OptimizerAsync} +} +\section{Methods}{ +\subsection{Public methods}{ +\itemize{ +\item \href{#method-OptimizerAsync-optimize}{\code{OptimizerAsync$optimize()}} +\item \href{#method-OptimizerAsync-clone}{\code{OptimizerAsync$clone()}} +} +} +\if{html}{\out{ +
Inherited methods + +
+}} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerAsync-optimize}{}}} +\subsection{Method \code{optimize()}}{ +Performs the optimization on a \link{OptimInstanceAsyncSingleCrit} or \link{OptimInstanceAsyncMultiCrit} until termination. +The single evaluations will be written into the \link{ArchiveAsync}. +The result will be written into the instance object. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimizerAsync$optimize(inst)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{inst}}{(\link{OptimInstanceAsyncSingleCrit} | \link{OptimInstanceAsyncMultiCrit}).} +} +\if{html}{\out{
}} +} +\subsection{Returns}{ +\code{\link[data.table:data.table]{data.table::data.table()}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerAsync-clone}{}}} +\subsection{Method \code{clone()}}{ +The objects of this class are cloneable with this method. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimizerAsync$clone(deep = FALSE)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{deep}}{Whether to make a deep clone.} +} +\if{html}{\out{
}} +} +} +} diff --git a/man/OptimizerBatch.Rd b/man/OptimizerBatch.Rd new file mode 100644 index 000000000..a8e7dbc8d --- /dev/null +++ b/man/OptimizerBatch.Rd @@ -0,0 +1,77 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/OptimizerBatch.R +\name{OptimizerBatch} +\alias{OptimizerBatch} +\title{Batch Optimizer} +\description{ +Abstract \code{OptimizerBatch} class that implements the base functionality each \code{OptimizerBatch} subclass must provide. +A \code{OptimizerBatch} object describes the optimization strategy. +A \code{OptimizerBatch} object must write its result to the \verb{$assign_result()} method of the \link{OptimInstance} at the end in order to store the best point and its estimated performance vector. +} +\section{Progress Bars}{ + +\verb{$optimize()} supports progress bars via the package \CRANpkg{progressr} +combined with a \link{Terminator}. Simply wrap the function in +\code{progressr::with_progress()} to enable them. We recommend to use package +\CRANpkg{progress} as backend; enable with \code{progressr::handlers("progress")}. +} + +\section{Super class}{ +\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{OptimizerBatch} +} +\section{Methods}{ +\subsection{Public methods}{ +\itemize{ +\item \href{#method-OptimizerBatch-optimize}{\code{OptimizerBatch$optimize()}} +\item \href{#method-OptimizerBatch-clone}{\code{OptimizerBatch$clone()}} +} +} +\if{html}{\out{ +
Inherited methods + +
+}} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerBatch-optimize}{}}} +\subsection{Method \code{optimize()}}{ +Performs the optimization and writes optimization result into \link{OptimInstanceBatch}. +The optimization result is returned but the complete optimization path is stored in \link{ArchiveBatch} of \link{OptimInstanceBatch}. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimizerBatch$optimize(inst)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{inst}}{(\link{OptimInstanceBatch}).} +} +\if{html}{\out{
}} +} +\subsection{Returns}{ +\link[data.table:data.table]{data.table::data.table}. +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerBatch-clone}{}}} +\subsection{Method \code{clone()}}{ +The objects of this class are cloneable with this method. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimizerBatch$clone(deep = FALSE)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{deep}}{Whether to make a deep clone.} +} +\if{html}{\out{
}} +} +} +} diff --git a/man/as_terminator.Rd b/man/as_terminator.Rd new file mode 100644 index 000000000..6776b2b2b --- /dev/null +++ b/man/as_terminator.Rd @@ -0,0 +1,33 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/as_terminator.R +\name{as_terminator} +\alias{as_terminator} +\alias{as_terminator.Terminator} +\alias{as_terminators} +\alias{as_terminators.default} +\alias{as_terminators.list} +\title{Convert to a Terminator} +\usage{ +as_terminator(x, ...) + +\method{as_terminator}{Terminator}(x, clone = FALSE, ...) + +as_terminators(x, ...) + +\method{as_terminators}{default}(x, ...) + +\method{as_terminators}{list}(x, ...) +} +\arguments{ +\item{x}{(any)\cr +Object to convert.} + +\item{...}{(any)\cr +Additional arguments.} + +\item{clone}{(\code{logical(1)})\cr +If \code{TRUE}, ensures that the returned object is not the same as the input \code{x}.} +} +\description{ +Convert object to a \link{Terminator} or a list of \link{Terminator}. +} diff --git a/man/assign_result_default.Rd b/man/assign_result_default.Rd index 904075b10..e74b7014c 100644 --- a/man/assign_result_default.Rd +++ b/man/assign_result_default.Rd @@ -1,8 +1,8 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/helper.R +% Please edit documentation in R/Optimizer.R \name{assign_result_default} \alias{assign_result_default} -\title{Default assign_result function} +\title{Default Assign Result Function} \usage{ assign_result_default(inst) } diff --git a/man/bb_optimize.Rd b/man/bb_optimize.Rd index f040f91f2..dd9d532ed 100644 --- a/man/bb_optimize.Rd +++ b/man/bb_optimize.Rd @@ -68,7 +68,7 @@ If named, names are used to create the codomain.} \itemize{ \item \code{"par"} - Best found parameters \item \code{"value"} - Optimal outcome -\item \code{"instance"} - \link{OptimInstanceSingleCrit} | \link{OptimInstanceMultiCrit} +\item \code{"instance"} - \link{OptimInstanceBatchSingleCrit} | \link{OptimInstanceBatchMultiCrit} } } \description{ diff --git a/man/bbotk.backup.Rd b/man/bbotk.backup.Rd index 6be9f2788..fbbbc39c9 100644 --- a/man/bbotk.backup.Rd +++ b/man/bbotk.backup.Rd @@ -4,7 +4,7 @@ \alias{bbotk.backup} \title{Backup Archive Callback} \description{ -This \link{CallbackOptimization} writes the \link{Archive} after each batch to disk. +This \link{CallbackBatch} writes the \link{Archive} after each batch to disk. } \examples{ clbk("bbotk.backup", path = "backup.rds") diff --git a/man/bbotk_assertions.Rd b/man/bbotk_assertions.Rd index b4209c970..169982081 100644 --- a/man/bbotk_assertions.Rd +++ b/man/bbotk_assertions.Rd @@ -3,27 +3,59 @@ \name{bbotk_assertions} \alias{bbotk_assertions} \alias{assert_terminator} +\alias{assert_terminators} \alias{assert_terminable} \alias{assert_set} \alias{assert_optimizer} +\alias{assert_optimizer_async} +\alias{assert_optimizer_batch} +\alias{assert_instance} +\alias{assert_instance_batch} +\alias{assert_instance_async} \alias{assert_instance_properties} +\alias{assert_archive} +\alias{assert_archive_async} +\alias{assert_archive_batch} \title{Assertion for bbotk objects} \usage{ -assert_terminator(terminator, instance = NULL) +assert_terminator(terminator, instance = NULL, null_ok = FALSE) + +assert_terminators(terminators) assert_terminable(terminator, instance) assert_set(x, empty = TRUE, .var.name = vname(x)) -assert_optimizer(optimizer) +assert_optimizer(optimizer, null_ok = FALSE) + +assert_optimizer_async(optimizer, null_ok = FALSE) + +assert_optimizer_batch(optimizer, null_ok = FALSE) + +assert_instance(inst, null_ok = FALSE) + +assert_instance_batch(inst, null_ok = FALSE) + +assert_instance_async(inst, null_ok = FALSE) assert_instance_properties(optimizer, inst) + +assert_archive(archive, null_ok = FALSE) + +assert_archive_async(archive, null_ok = FALSE) + +assert_archive_batch(archive, null_ok = FALSE) } \arguments{ \item{terminator}{(\link{Terminator}).} \item{instance}{(\link{OptimInstance}).} +\item{null_ok}{(\code{logical(1)})\cr +Is \code{NULL} a valid value?} + +\item{terminators}{(list of \link{Terminator}).} + \item{x}{(any)} \item{empty}{(\code{logical(1)})} @@ -31,6 +63,10 @@ assert_instance_properties(optimizer, inst) \item{.var.name}{(\code{character(1)})} \item{optimizer}{(\link{Optimizer}).} + +\item{inst}{(\link{OptimInstanceAsync})} + +\item{archive}{(\link{ArchiveBatch}).} } \description{ Most assertion functions ensure the right class attribute, and optionally diff --git a/man/bbotk_worker_loop.Rd b/man/bbotk_worker_loop.Rd new file mode 100644 index 000000000..81db91ac8 --- /dev/null +++ b/man/bbotk_worker_loop.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/worker_loops.R +\name{bbotk_worker_loop} +\alias{bbotk_worker_loop} +\title{Worker loop for Rush} +\usage{ +bbotk_worker_loop(rush, optimizer, instance) +} +\arguments{ +\item{rush}{(\code{Rush})\cr +If a rush instance is supplied, the tuning runs without batches.} + +\item{optimizer}{\link{OptimizerAsync}.} + +\item{instance}{\link{OptimInstanceAsync}.} +} +\description{ +Loop run on the workers. +Pops a task from the queue and evaluates it with the objective function. +Pushes the results back to the data base. +} +\keyword{internal} diff --git a/man/callback_async.Rd b/man/callback_async.Rd new file mode 100644 index 000000000..676979cea --- /dev/null +++ b/man/callback_async.Rd @@ -0,0 +1,77 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/CallbackAsync.R +\name{callback_async} +\alias{callback_async} +\title{Create Asynchronous Optimization Callback} +\usage{ +callback_async( + id, + label = NA_character_, + man = NA_character_, + on_optimization_begin = NULL, + on_worker_begin = NULL, + on_worker_end = NULL, + on_result = NULL, + on_optimization_end = NULL +) +} +\arguments{ +\item{id}{(\code{character(1)})\cr +Identifier for the new instance.} + +\item{label}{(\code{character(1)})\cr +Label for the new instance.} + +\item{man}{(\code{character(1)})\cr +String in the format \verb{[pkg]::[topic]} pointing to a manual page for this object. +The referenced help package can be opened via method \verb{$help()}.} + +\item{on_optimization_begin}{(\verb{function()})\cr +Stage called at the beginning of the optimization in the main process. +Called in \code{Optimizer$optimize()}. +The functions must have two arguments named \code{callback} and \code{context}.} + +\item{on_worker_begin}{(\verb{function()})\cr +Stage called at the beginning of the optimization on the worker. +Called in the worker loop. +The functions must have two arguments named \code{callback} and \code{context}.} + +\item{on_worker_end}{(\verb{function()})\cr +Stage called at the end of the optimization on the worker. +Called in the worker loop. +The functions must have two arguments named \code{callback} and \code{context}.} + +\item{on_result}{(\verb{function()})\cr +Stage called after result are written. +Called in \code{OptimInstance$assign_result()}. +The functions must have two arguments named \code{callback} and \code{context}.} + +\item{on_optimization_end}{(\verb{function()})\cr +Stage called at the end of the optimization in the main process. +Called in \code{Optimizer$optimize()}. +The functions must have two arguments named \code{callback} and \code{context}.} +} +\description{ +Function to create a \link{CallbackAsync}. + +Optimization callbacks can be called from different stages of optimization process. +The stages are prefixed with \verb{on_*}. + +\if{html}{\out{
}}\preformatted{Start Optimization + - on_optimization_begin + Start Worker + - on_worker_begin + - on_worker_end + End Worker + - on_result + - on_optimization_end +End Optimization +}\if{html}{\out{
}} + +See also the section on parameters for more information on the stages. +A optimization callback works with \link{ContextAsync}. +} +\details{ +A callback can write data to its state (\verb{$state}), e.g. settings that affect the callback itself. +The \link{ContextAsync} allows to modify the instance, archive, optimizer and final result. +} diff --git a/man/callback_optimization.Rd b/man/callback_batch.Rd similarity index 85% rename from man/callback_optimization.Rd rename to man/callback_batch.Rd index 89c2352d7..18e382b5d 100644 --- a/man/callback_optimization.Rd +++ b/man/callback_batch.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/CallbackOptimization.R -\name{callback_optimization} -\alias{callback_optimization} -\title{Create Optimization Callback} +% Please edit documentation in R/CallbackBatch.R +\name{callback_batch} +\alias{callback_batch} +\title{Create Batch Optimization Callback} \usage{ -callback_optimization( +callback_batch( id, label = NA_character_, man = NA_character_, @@ -56,7 +56,7 @@ The functions must have two arguments named \code{callback} and \code{context}.} List of additional fields.} } \description{ -Function to create a \link{CallbackOptimization}. +Function to create a \link{CallbackBatch}. Optimization callbacks can be called from different stages of optimization process. The stages are prefixed with \verb{on_*}. @@ -73,15 +73,15 @@ End Optimization }\if{html}{\out{
}} See also the section on parameters for more information on the stages. -A optimization callback works with \link{ContextOptimization}. +A optimization callback works with \link{ContextBatch}. } \details{ A callback can write data to its state (\verb{$state}), e.g. settings that affect the callback itself. -The \link{ContextOptimization} allows to modify the instance, archive, optimizer and final result. +The \link{ContextBatch} allows to modify the instance, archive, optimizer and final result. } \examples{ # write archive to disk -callback_optimization("bbotk.backup", +callback_batch("bbotk.backup", on_optimization_end = function(callback, context) { saveRDS(context$instance$archive, "archive.rds") } diff --git a/man/evaluate_queue_default.Rd b/man/evaluate_queue_default.Rd new file mode 100644 index 000000000..59e95c070 --- /dev/null +++ b/man/evaluate_queue_default.Rd @@ -0,0 +1,15 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/OptimizerAsync.R +\name{evaluate_queue_default} +\alias{evaluate_queue_default} +\title{Default Evaluation of the Queue} +\usage{ +evaluate_queue_default(instance) +} +\arguments{ +\item{instance}{\link{OptimInstanceAsync}.} +} +\description{ +Used internally in \verb{$.optimize()} of \link{OptimizerAsync} classes to evaluate a queue of points e.g. in \link{OptimizerAsyncGridSearch}. +} +\keyword{internal} diff --git a/man/mlr_optimizers_async_design_points.Rd b/man/mlr_optimizers_async_design_points.Rd new file mode 100644 index 000000000..9dbb45bbd --- /dev/null +++ b/man/mlr_optimizers_async_design_points.Rd @@ -0,0 +1,96 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/OptimizerAsyncDesignPoints.R +\name{mlr_optimizers_async_design_points} +\alias{mlr_optimizers_async_design_points} +\alias{OptimizerAsyncDesignPoints} +\title{Asynchronous Optimization via Design Points} +\description{ +\code{OptimizerAsyncDesignPoints} class that implements optimization w.r.t. fixed design points. +We simply search over a set of points fully specified by the ser. +} +\section{Dictionary}{ + +This \link{Optimizer} can be instantiated via the \link[mlr3misc:Dictionary]{dictionary} +\link{mlr_optimizers} or with the associated sugar function \code{\link[=opt]{opt()}}: + +\if{html}{\out{
}}\preformatted{mlr_optimizers$get("async_design_points") +opt("async_design_points") +}\if{html}{\out{
}} +} + +\section{Parameters}{ + +\describe{ +\item{\code{design}}{\link[data.table:data.table]{data.table::data.table}\cr +Design points to try in search, one per row.} +} +} + +\section{Super classes}{ +\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{\link[bbotk:OptimizerAsync]{bbotk::OptimizerAsync}} -> \code{OptimizerAsyncDesignPoints} +} +\section{Methods}{ +\subsection{Public methods}{ +\itemize{ +\item \href{#method-OptimizerAsyncDesignPoints-new}{\code{OptimizerAsyncDesignPoints$new()}} +\item \href{#method-OptimizerAsyncDesignPoints-optimize}{\code{OptimizerAsyncDesignPoints$optimize()}} +\item \href{#method-OptimizerAsyncDesignPoints-clone}{\code{OptimizerAsyncDesignPoints$clone()}} +} +} +\if{html}{\out{ +
Inherited methods + +
+}} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerAsyncDesignPoints-new}{}}} +\subsection{Method \code{new()}}{ +Creates a new instance of this \link[R6:R6Class]{R6} class. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimizerAsyncDesignPoints$new()}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerAsyncDesignPoints-optimize}{}}} +\subsection{Method \code{optimize()}}{ +Starts the asynchronous optimization. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimizerAsyncDesignPoints$optimize(inst)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{inst}}{(\link{OptimInstance}).} +} +\if{html}{\out{
}} +} +\subsection{Returns}{ +\link[data.table:data.table]{data.table::data.table}. +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerAsyncDesignPoints-clone}{}}} +\subsection{Method \code{clone()}}{ +The objects of this class are cloneable with this method. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimizerAsyncDesignPoints$clone(deep = FALSE)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{deep}}{Whether to make a deep clone.} +} +\if{html}{\out{
}} +} +} +} diff --git a/man/mlr_optimizers_async_grid_search.Rd b/man/mlr_optimizers_async_grid_search.Rd new file mode 100644 index 000000000..5cfb97d2a --- /dev/null +++ b/man/mlr_optimizers_async_grid_search.Rd @@ -0,0 +1,103 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/OptimizerAsyncGridSearch.R +\name{mlr_optimizers_async_grid_search} +\alias{mlr_optimizers_async_grid_search} +\alias{OptimizerAsyncGridSearch} +\title{Asynchronous Optimization via Grid Search} +\source{ +Bergstra J, Bengio Y (2012). +\dQuote{Random Search for Hyper-Parameter Optimization.} +\emph{Journal of Machine Learning Research}, \bold{13}(10), 281--305. +\url{https://jmlr.csail.mit.edu/papers/v13/bergstra12a.html}. +} +\description{ +\code{OptimizerAsyncGridSearch} class that implements a grid search. +The grid is constructed as a Cartesian product over discretized values per parameter, see \code{\link[paradox:generate_design_grid]{paradox::generate_design_grid()}}. +The points of the grid are evaluated in a random order. +} +\section{Dictionary}{ + +This \link{Optimizer} can be instantiated via the \link[mlr3misc:Dictionary]{dictionary} +\link{mlr_optimizers} or with the associated sugar function \code{\link[=opt]{opt()}}: + +\if{html}{\out{
}}\preformatted{mlr_optimizers$get("async_grid_search") +opt("async_grid_search") +}\if{html}{\out{
}} +} + +\section{Parameters}{ + +\describe{ +\item{\code{batch_size}}{\code{integer(1)}\cr +Maximum number of points to try in a batch.} +} +} + +\section{Super classes}{ +\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{\link[bbotk:OptimizerAsync]{bbotk::OptimizerAsync}} -> \code{OptimizerAsyncGridSearch} +} +\section{Methods}{ +\subsection{Public methods}{ +\itemize{ +\item \href{#method-OptimizerAsyncGridSearch-new}{\code{OptimizerAsyncGridSearch$new()}} +\item \href{#method-OptimizerAsyncGridSearch-optimize}{\code{OptimizerAsyncGridSearch$optimize()}} +\item \href{#method-OptimizerAsyncGridSearch-clone}{\code{OptimizerAsyncGridSearch$clone()}} +} +} +\if{html}{\out{ +
Inherited methods + +
+}} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerAsyncGridSearch-new}{}}} +\subsection{Method \code{new()}}{ +Creates a new instance of this \link[R6:R6Class]{R6} class. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimizerAsyncGridSearch$new()}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerAsyncGridSearch-optimize}{}}} +\subsection{Method \code{optimize()}}{ +Starts the asynchronous optimization. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimizerAsyncGridSearch$optimize(inst)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{inst}}{(\link{OptimInstance}).} +} +\if{html}{\out{
}} +} +\subsection{Returns}{ +\link[data.table:data.table]{data.table::data.table}. +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerAsyncGridSearch-clone}{}}} +\subsection{Method \code{clone()}}{ +The objects of this class are cloneable with this method. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimizerAsyncGridSearch$clone(deep = FALSE)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{deep}}{Whether to make a deep clone.} +} +\if{html}{\out{
}} +} +} +} diff --git a/man/mlr_optimizers_async_random_search.Rd b/man/mlr_optimizers_async_random_search.Rd new file mode 100644 index 000000000..93fde9377 --- /dev/null +++ b/man/mlr_optimizers_async_random_search.Rd @@ -0,0 +1,73 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/OptimizerAsyncRandomSearch.R +\name{mlr_optimizers_async_random_search} +\alias{mlr_optimizers_async_random_search} +\alias{OptimizerAsyncRandomSearch} +\title{Asynchronous Optimization via Random Search} +\source{ +Bergstra J, Bengio Y (2012). +\dQuote{Random Search for Hyper-Parameter Optimization.} +\emph{Journal of Machine Learning Research}, \bold{13}(10), 281--305. +\url{https://jmlr.csail.mit.edu/papers/v13/bergstra12a.html}. +} +\description{ +\code{OptimizerAsyncRandomSearch} class that implements a simple Random Search. +} +\section{Dictionary}{ + +This \link{Optimizer} can be instantiated via the \link[mlr3misc:Dictionary]{dictionary} +\link{mlr_optimizers} or with the associated sugar function \code{\link[=opt]{opt()}}: + +\if{html}{\out{
}}\preformatted{mlr_optimizers$get("async_random_search") +opt("async_random_search") +}\if{html}{\out{
}} +} + +\section{Super classes}{ +\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{\link[bbotk:OptimizerAsync]{bbotk::OptimizerAsync}} -> \code{OptimizerAsyncRandomSearch} +} +\section{Methods}{ +\subsection{Public methods}{ +\itemize{ +\item \href{#method-OptimizerAsyncRandomSearch-new}{\code{OptimizerAsyncRandomSearch$new()}} +\item \href{#method-OptimizerAsyncRandomSearch-clone}{\code{OptimizerAsyncRandomSearch$clone()}} +} +} +\if{html}{\out{ +
Inherited methods + +
+}} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerAsyncRandomSearch-new}{}}} +\subsection{Method \code{new()}}{ +Creates a new instance of this \link[R6:R6Class]{R6} class. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimizerAsyncRandomSearch$new()}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerAsyncRandomSearch-clone}{}}} +\subsection{Method \code{clone()}}{ +The objects of this class are cloneable with this method. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimizerAsyncRandomSearch$clone(deep = FALSE)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{deep}}{Whether to make a deep clone.} +} +\if{html}{\out{
}} +} +} +} diff --git a/man/mlr_optimizers_cmaes.Rd b/man/mlr_optimizers_cmaes.Rd index fd28638e7..0b15c8ae1 100644 --- a/man/mlr_optimizers_cmaes.Rd +++ b/man/mlr_optimizers_cmaes.Rd @@ -1,11 +1,11 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/OptimizerCmaes.R +% Please edit documentation in R/OptimizerBatchCmaes.R \name{mlr_optimizers_cmaes} \alias{mlr_optimizers_cmaes} -\alias{OptimizerCmaes} +\alias{OptimizerBatchCmaes} \title{Optimization via Covariance Matrix Adaptation Evolution Strategy} \description{ -\code{OptimizerCmaes} class that implements CMA-ES. Calls \code{\link[adagio:cmaes]{adagio::pureCMAES()}} +\code{OptimizerBatchCmaes} class that implements CMA-ES. Calls \code{\link[adagio:cmaes]{adagio::pureCMAES()}} from package \CRANpkg{adagio}. The algorithm is typically applied to search space dimensions between three and fifty. Lower search space dimensions might crash. @@ -60,7 +60,7 @@ if (requireNamespace("adagio")) { domain = domain, codomain = codomain) - instance = OptimInstanceSingleCrit$new( + instance = OptimInstanceBatchSingleCrit$new( objective = objective, search_space = search_space, terminator = trm("evals", n_evals = 10)) @@ -77,14 +77,14 @@ if (requireNamespace("adagio")) { as.data.table(instance$archive$data) } } -\section{Super class}{ -\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{OptimizerCmaes} +\section{Super classes}{ +\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{\link[bbotk:OptimizerBatch]{bbotk::OptimizerBatch}} -> \code{OptimizerBatchCmaes} } \section{Methods}{ \subsection{Public methods}{ \itemize{ -\item \href{#method-OptimizerCmaes-new}{\code{OptimizerCmaes$new()}} -\item \href{#method-OptimizerCmaes-clone}{\code{OptimizerCmaes$clone()}} +\item \href{#method-OptimizerBatchCmaes-new}{\code{OptimizerBatchCmaes$new()}} +\item \href{#method-OptimizerBatchCmaes-clone}{\code{OptimizerBatchCmaes$clone()}} } } \if{html}{\out{ @@ -92,28 +92,28 @@ if (requireNamespace("adagio")) { }} \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimizerCmaes-new}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerBatchCmaes-new}{}}} \subsection{Method \code{new()}}{ Creates a new instance of this \link[R6:R6Class]{R6} class. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimizerCmaes$new()}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{OptimizerBatchCmaes$new()}\if{html}{\out{
}} } } \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimizerCmaes-clone}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerBatchCmaes-clone}{}}} \subsection{Method \code{clone()}}{ The objects of this class are cloneable with this method. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimizerCmaes$clone(deep = FALSE)}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{OptimizerBatchCmaes$clone(deep = FALSE)}\if{html}{\out{
}} } \subsection{Arguments}{ diff --git a/man/mlr_optimizers_design_points.Rd b/man/mlr_optimizers_design_points.Rd index f8242c981..b8e836223 100644 --- a/man/mlr_optimizers_design_points.Rd +++ b/man/mlr_optimizers_design_points.Rd @@ -1,11 +1,11 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/OptimizerDesignPoints.R +% Please edit documentation in R/OptimizerBatchDesignPoints.R \name{mlr_optimizers_design_points} \alias{mlr_optimizers_design_points} -\alias{OptimizerDesignPoints} +\alias{OptimizerBatchDesignPoints} \title{Optimization via Design Points} \description{ -\code{OptimizerDesignPoints} class that implements optimization w.r.t. fixed +\code{OptimizerBatchDesignPoints} class that implements optimization w.r.t. fixed design points. We simply search over a set of points fully specified by the user. The points in the design are evaluated in order as given. @@ -57,7 +57,7 @@ objective = ObjectiveRFun$new( domain = domain, codomain = codomain) -instance = OptimInstanceSingleCrit$new( +instance = OptimInstanceBatchSingleCrit$new( objective = objective, search_space = search_space, terminator = trm("evals", n_evals = 10)) @@ -75,14 +75,14 @@ instance$result # Allows access of data.table of full path of all evaluations as.data.table(instance$archive) } -\section{Super class}{ -\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{OptimizerDesignPoints} +\section{Super classes}{ +\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{\link[bbotk:OptimizerBatch]{bbotk::OptimizerBatch}} -> \code{OptimizerBatchDesignPoints} } \section{Methods}{ \subsection{Public methods}{ \itemize{ -\item \href{#method-OptimizerDesignPoints-new}{\code{OptimizerDesignPoints$new()}} -\item \href{#method-OptimizerDesignPoints-clone}{\code{OptimizerDesignPoints$clone()}} +\item \href{#method-OptimizerBatchDesignPoints-new}{\code{OptimizerBatchDesignPoints$new()}} +\item \href{#method-OptimizerBatchDesignPoints-clone}{\code{OptimizerBatchDesignPoints$clone()}} } } \if{html}{\out{ @@ -90,28 +90,28 @@ as.data.table(instance$archive) }} \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimizerDesignPoints-new}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerBatchDesignPoints-new}{}}} \subsection{Method \code{new()}}{ Creates a new instance of this \link[R6:R6Class]{R6} class. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimizerDesignPoints$new()}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{OptimizerBatchDesignPoints$new()}\if{html}{\out{
}} } } \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimizerDesignPoints-clone}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerBatchDesignPoints-clone}{}}} \subsection{Method \code{clone()}}{ The objects of this class are cloneable with this method. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimizerDesignPoints$clone(deep = FALSE)}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{OptimizerBatchDesignPoints$clone(deep = FALSE)}\if{html}{\out{
}} } \subsection{Arguments}{ diff --git a/man/mlr_optimizers_focus_search.Rd b/man/mlr_optimizers_focus_search.Rd index c9a2ddcae..94fcb29a5 100644 --- a/man/mlr_optimizers_focus_search.Rd +++ b/man/mlr_optimizers_focus_search.Rd @@ -1,11 +1,11 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/OptimizerFocusSearch.R +% Please edit documentation in R/OptimizerBatchFocusSearch.R \name{mlr_optimizers_focus_search} \alias{mlr_optimizers_focus_search} -\alias{OptimizerFocusSearch} +\alias{OptimizerBatchFocusSearch} \title{Optimization via Focus Search} \description{ -\code{OptimizerFocusSearch} class that implements a Focus Search. +\code{OptimizerBatchFocusSearch} class that implements a Focus Search. Focus Search starts with evaluating \code{n_points} drawn uniformly at random. For 1 to \code{maxit} batches, \code{n_points} are then drawn uniformly at random and @@ -60,7 +60,7 @@ objective = ObjectiveRFun$new( domain = domain, codomain = codomain) -instance = OptimInstanceSingleCrit$new( +instance = OptimInstanceBatchSingleCrit$new( objective = objective, search_space = search_space, terminator = trm("evals", n_evals = 10)) @@ -77,14 +77,14 @@ instance$result # allows access of data.table of full path of all evaluations as.data.table(instance$archive$data) } -\section{Super class}{ -\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{OptimizerFocusSearch} +\section{Super classes}{ +\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{\link[bbotk:OptimizerBatch]{bbotk::OptimizerBatch}} -> \code{OptimizerBatchFocusSearch} } \section{Methods}{ \subsection{Public methods}{ \itemize{ -\item \href{#method-OptimizerFocusSearch-new}{\code{OptimizerFocusSearch$new()}} -\item \href{#method-OptimizerFocusSearch-clone}{\code{OptimizerFocusSearch$clone()}} +\item \href{#method-OptimizerBatchFocusSearch-new}{\code{OptimizerBatchFocusSearch$new()}} +\item \href{#method-OptimizerBatchFocusSearch-clone}{\code{OptimizerBatchFocusSearch$clone()}} } } \if{html}{\out{ @@ -92,28 +92,28 @@ as.data.table(instance$archive$data) }} \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimizerFocusSearch-new}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerBatchFocusSearch-new}{}}} \subsection{Method \code{new()}}{ Creates a new instance of this \link[R6:R6Class]{R6} class. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimizerFocusSearch$new()}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{OptimizerBatchFocusSearch$new()}\if{html}{\out{
}} } } \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimizerFocusSearch-clone}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerBatchFocusSearch-clone}{}}} \subsection{Method \code{clone()}}{ The objects of this class are cloneable with this method. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimizerFocusSearch$clone(deep = FALSE)}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{OptimizerBatchFocusSearch$clone(deep = FALSE)}\if{html}{\out{
}} } \subsection{Arguments}{ diff --git a/man/mlr_optimizers_gensa.Rd b/man/mlr_optimizers_gensa.Rd index d84000b3e..29d99892f 100644 --- a/man/mlr_optimizers_gensa.Rd +++ b/man/mlr_optimizers_gensa.Rd @@ -1,8 +1,8 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/OptimizerGenSA.R +% Please edit documentation in R/OptimizerBatchGenSA.R \name{mlr_optimizers_gensa} \alias{mlr_optimizers_gensa} -\alias{OptimizerGenSA} +\alias{OptimizerBatchGenSA} \title{Optimization via Generalized Simulated Annealing} \source{ Tsallis C, Stariolo DA (1996). @@ -16,7 +16,7 @@ Xiang Y, Gubian S, Suomela B, Hoeng J (2013). \doi{10.32614/rj-2013-002}. } \description{ -\code{OptimizerGenSA} class that implements generalized simulated annealing. Calls +\code{OptimizerBatchGenSA} class that implements generalized simulated annealing. Calls \code{\link[GenSA:GenSA]{GenSA::GenSA()}} from package \CRANpkg{GenSA}. } \section{Dictionary}{ @@ -73,7 +73,7 @@ if (requireNamespace("GenSA")) { domain = domain, codomain = codomain) - instance = OptimInstanceSingleCrit$new( + instance = OptimInstanceBatchSingleCrit$new( objective = objective, search_space = search_space, terminator = trm("evals", n_evals = 10)) @@ -90,14 +90,14 @@ if (requireNamespace("GenSA")) { as.data.table(instance$archive$data) } } -\section{Super class}{ -\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{OptimizerGenSA} +\section{Super classes}{ +\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{\link[bbotk:OptimizerBatch]{bbotk::OptimizerBatch}} -> \code{OptimizerBatchGenSA} } \section{Methods}{ \subsection{Public methods}{ \itemize{ -\item \href{#method-OptimizerGenSA-new}{\code{OptimizerGenSA$new()}} -\item \href{#method-OptimizerGenSA-clone}{\code{OptimizerGenSA$clone()}} +\item \href{#method-OptimizerBatchGenSA-new}{\code{OptimizerBatchGenSA$new()}} +\item \href{#method-OptimizerBatchGenSA-clone}{\code{OptimizerBatchGenSA$clone()}} } } \if{html}{\out{ @@ -105,28 +105,28 @@ if (requireNamespace("GenSA")) { }} \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimizerGenSA-new}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerBatchGenSA-new}{}}} \subsection{Method \code{new()}}{ Creates a new instance of this \link[R6:R6Class]{R6} class. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimizerGenSA$new()}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{OptimizerBatchGenSA$new()}\if{html}{\out{
}} } } \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimizerGenSA-clone}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerBatchGenSA-clone}{}}} \subsection{Method \code{clone()}}{ The objects of this class are cloneable with this method. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimizerGenSA$clone(deep = FALSE)}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{OptimizerBatchGenSA$clone(deep = FALSE)}\if{html}{\out{
}} } \subsection{Arguments}{ diff --git a/man/mlr_optimizers_grid_search.Rd b/man/mlr_optimizers_grid_search.Rd index 9ff04b468..46745d573 100644 --- a/man/mlr_optimizers_grid_search.Rd +++ b/man/mlr_optimizers_grid_search.Rd @@ -1,11 +1,11 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/OptimizerGridSearch.R +% Please edit documentation in R/OptimizerBatchGridSearch.R \name{mlr_optimizers_grid_search} \alias{mlr_optimizers_grid_search} -\alias{OptimizerGridSearch} +\alias{OptimizerBatchGridSearch} \title{Optimization via Grid Search} \description{ -\code{OptimizerGridSearch} class that implements grid search. The grid is +\code{OptimizerBatchGridSearch} class that implements grid search. The grid is constructed as a Cartesian product over discretized values per parameter, see \code{\link[paradox:generate_design_grid]{paradox::generate_design_grid()}}. The points of the grid are evaluated in a random order. @@ -60,7 +60,7 @@ objective = ObjectiveRFun$new( domain = domain, codomain = codomain) -instance = OptimInstanceSingleCrit$new( +instance = OptimInstanceBatchSingleCrit$new( objective = objective, search_space = search_space, terminator = trm("evals", n_evals = 10)) @@ -77,14 +77,14 @@ instance$result # allows access of data.table of full path of all evaluations as.data.table(instance$archive$data) } -\section{Super class}{ -\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{OptimizerGridSearch} +\section{Super classes}{ +\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{\link[bbotk:OptimizerBatch]{bbotk::OptimizerBatch}} -> \code{OptimizerBatchGridSearch} } \section{Methods}{ \subsection{Public methods}{ \itemize{ -\item \href{#method-OptimizerGridSearch-new}{\code{OptimizerGridSearch$new()}} -\item \href{#method-OptimizerGridSearch-clone}{\code{OptimizerGridSearch$clone()}} +\item \href{#method-OptimizerBatchGridSearch-new}{\code{OptimizerBatchGridSearch$new()}} +\item \href{#method-OptimizerBatchGridSearch-clone}{\code{OptimizerBatchGridSearch$clone()}} } } \if{html}{\out{ @@ -92,28 +92,28 @@ as.data.table(instance$archive$data) }} \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimizerGridSearch-new}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerBatchGridSearch-new}{}}} \subsection{Method \code{new()}}{ Creates a new instance of this \link[R6:R6Class]{R6} class. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimizerGridSearch$new()}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{OptimizerBatchGridSearch$new()}\if{html}{\out{
}} } } \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimizerGridSearch-clone}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerBatchGridSearch-clone}{}}} \subsection{Method \code{clone()}}{ The objects of this class are cloneable with this method. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimizerGridSearch$clone(deep = FALSE)}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{OptimizerBatchGridSearch$clone(deep = FALSE)}\if{html}{\out{
}} } \subsection{Arguments}{ diff --git a/man/mlr_optimizers_irace.Rd b/man/mlr_optimizers_irace.Rd index fe8e53b58..0e846f09a 100644 --- a/man/mlr_optimizers_irace.Rd +++ b/man/mlr_optimizers_irace.Rd @@ -1,8 +1,8 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/OptimizerIrace.R +% Please edit documentation in R/OptimizerBatchIrace.R \name{mlr_optimizers_irace} \alias{mlr_optimizers_irace} -\alias{OptimizerIrace} +\alias{OptimizerBatchIrace} \title{Optimization via Iterated Racing} \source{ Lopez-Ibanez M, Dubois-Lacoste J, Caceres LP, Birattari M, Stuetzle T (2016). @@ -11,7 +11,7 @@ Lopez-Ibanez M, Dubois-Lacoste J, Caceres LP, Birattari M, Stuetzle T (2016). \doi{https://doi.org/10.1016/j.orp.2016.09.002}. } \description{ -\code{OptimizerIrace} class that implements iterated racing. Calls +\code{OptimizerBatchIrace} class that implements iterated racing. Calls \code{\link[irace:irace]{irace::irace()}} from package \CRANpkg{irace}. } \section{Parameters}{ @@ -28,7 +28,7 @@ configuration and instance. A default function is provided, see section For the meaning of all other parameters, see \code{\link[irace:defaultScenario]{irace::defaultScenario()}}. Note that we have removed all control parameters which refer to the termination of the algorithm. Use \link{TerminatorEvals} instead. Other terminators do not work -with \code{OptimizerIrace}. +with \code{OptimizerBatchIrace}. In contrast to \code{\link[irace:defaultScenario]{irace::defaultScenario()}}, we set \code{digits = 15}. This represents double parameters with a higher precision and avoids rounding errors. @@ -38,7 +38,7 @@ This represents double parameters with a higher precision and avoids rounding er The irace package uses a \code{targetRunner} script or R function to evaluate a configuration on a particular instance. Usually it is not necessary to -specify a \code{targetRunner} function when using \code{OptimizerIrace}. A default +specify a \code{targetRunner} function when using \code{OptimizerBatchIrace}. A default function is used that forwards several configurations and instances to the user defined objective function. As usually, the user defined function has a \code{xs}, \code{xss} or \code{xdt} parameter depending on the used \link{Objective} class. @@ -91,6 +91,7 @@ combined with a \link{Terminator}. Simply wrap the function in } \examples{ +\donttest{ library(data.table) search_space = domain = ps( @@ -115,7 +116,7 @@ objective = ObjectiveRFunDt$new( codomain = codomain, constants = ps(instances = p_uty())) -instance = OptimInstanceSingleCrit$new( +instance = OptimInstanceBatchSingleCrit$new( objective = objective, search_space = search_space, terminator = trm("evals", n_evals = 1000)) @@ -135,14 +136,15 @@ instance$result # all evaluations as.data.table(instance$archive) } -\section{Super class}{ -\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{OptimizerIrace} +} +\section{Super classes}{ +\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{\link[bbotk:OptimizerBatch]{bbotk::OptimizerBatch}} -> \code{OptimizerBatchIrace} } \section{Methods}{ \subsection{Public methods}{ \itemize{ -\item \href{#method-OptimizerIrace-new}{\code{OptimizerIrace$new()}} -\item \href{#method-OptimizerIrace-clone}{\code{OptimizerIrace$clone()}} +\item \href{#method-OptimizerBatchIrace-new}{\code{OptimizerBatchIrace$new()}} +\item \href{#method-OptimizerBatchIrace-clone}{\code{OptimizerBatchIrace$clone()}} } } \if{html}{\out{ @@ -150,28 +152,28 @@ as.data.table(instance$archive) }} \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimizerIrace-new}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerBatchIrace-new}{}}} \subsection{Method \code{new()}}{ Creates a new instance of this \link[R6:R6Class]{R6} class. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimizerIrace$new()}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{OptimizerBatchIrace$new()}\if{html}{\out{
}} } } \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimizerIrace-clone}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerBatchIrace-clone}{}}} \subsection{Method \code{clone()}}{ The objects of this class are cloneable with this method. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimizerIrace$clone(deep = FALSE)}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{OptimizerBatchIrace$clone(deep = FALSE)}\if{html}{\out{
}} } \subsection{Arguments}{ diff --git a/man/mlr_optimizers_nloptr.Rd b/man/mlr_optimizers_nloptr.Rd index 5e223bdc3..e8cb20d5e 100644 --- a/man/mlr_optimizers_nloptr.Rd +++ b/man/mlr_optimizers_nloptr.Rd @@ -1,8 +1,8 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/OptimizerNLoptr.R +% Please edit documentation in R/OptimizerBatchNLoptr.R \name{mlr_optimizers_nloptr} \alias{mlr_optimizers_nloptr} -\alias{OptimizerNLoptr} +\alias{OptimizerBatchNLoptr} \title{Optimization via Non-linear Optimization} \source{ Johnson, G S (2020). @@ -10,7 +10,7 @@ Johnson, G S (2020). \url{https://github.com/stevengj/nlopt}. } \description{ -\code{OptimizerNLoptr} class that implements non-linear optimization. Calls +\code{OptimizerBatchNLoptr} class that implements non-linear optimization. Calls \code{\link[nloptr:nloptr]{nloptr::nloptr()}} from package \CRANpkg{nloptr}. } \section{Parameters}{ @@ -66,7 +66,7 @@ if (requireNamespace("nloptr")) { # We use the internal termination criterion xtol_rel terminator = trm("none") - instance = OptimInstanceSingleCrit$new( + instance = OptimInstanceBatchSingleCrit$new( objective = objective, search_space = search_space, terminator = terminator) @@ -86,14 +86,14 @@ if (requireNamespace("nloptr")) { } } -\section{Super class}{ -\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{OptimizerNLoptr} +\section{Super classes}{ +\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{\link[bbotk:OptimizerBatch]{bbotk::OptimizerBatch}} -> \code{OptimizerBatchNLoptr} } \section{Methods}{ \subsection{Public methods}{ \itemize{ -\item \href{#method-OptimizerNLoptr-new}{\code{OptimizerNLoptr$new()}} -\item \href{#method-OptimizerNLoptr-clone}{\code{OptimizerNLoptr$clone()}} +\item \href{#method-OptimizerBatchNLoptr-new}{\code{OptimizerBatchNLoptr$new()}} +\item \href{#method-OptimizerBatchNLoptr-clone}{\code{OptimizerBatchNLoptr$clone()}} } } \if{html}{\out{ @@ -101,28 +101,28 @@ if (requireNamespace("nloptr")) { }} \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimizerNLoptr-new}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerBatchNLoptr-new}{}}} \subsection{Method \code{new()}}{ Creates a new instance of this \link[R6:R6Class]{R6} class. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimizerNLoptr$new()}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{OptimizerBatchNLoptr$new()}\if{html}{\out{
}} } } \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimizerNLoptr-clone}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerBatchNLoptr-clone}{}}} \subsection{Method \code{clone()}}{ The objects of this class are cloneable with this method. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimizerNLoptr$clone(deep = FALSE)}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{OptimizerBatchNLoptr$clone(deep = FALSE)}\if{html}{\out{
}} } \subsection{Arguments}{ diff --git a/man/mlr_optimizers_random_search.Rd b/man/mlr_optimizers_random_search.Rd index 82e3dfa49..a17db0978 100644 --- a/man/mlr_optimizers_random_search.Rd +++ b/man/mlr_optimizers_random_search.Rd @@ -1,8 +1,8 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/OptimizerRandomSearch.R +% Please edit documentation in R/OptimizerBatchRandomSearch.R \name{mlr_optimizers_random_search} \alias{mlr_optimizers_random_search} -\alias{OptimizerRandomSearch} +\alias{OptimizerBatchRandomSearch} \title{Optimization via Random Search} \source{ Bergstra J, Bengio Y (2012). @@ -11,7 +11,7 @@ Bergstra J, Bengio Y (2012). \url{https://jmlr.csail.mit.edu/papers/v13/bergstra12a.html}. } \description{ -\code{OptimizerRandomSearch} class that implements a simple Random Search. +\code{OptimizerBatchRandomSearch} class that implements a simple Random Search. In order to support general termination criteria and parallelization, we evaluate points in a batch-fashion of size \code{batch_size}. Larger batches mean @@ -58,7 +58,7 @@ objective = ObjectiveRFun$new( domain = domain, codomain = codomain) -instance = OptimInstanceSingleCrit$new( +instance = OptimInstanceBatchSingleCrit$new( objective = objective, search_space = search_space, terminator = trm("evals", n_evals = 10)) @@ -75,14 +75,14 @@ instance$result # allows access of data.table of full path of all evaluations as.data.table(instance$archive$data) } -\section{Super class}{ -\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{OptimizerRandomSearch} +\section{Super classes}{ +\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{\link[bbotk:OptimizerBatch]{bbotk::OptimizerBatch}} -> \code{OptimizerBatchRandomSearch} } \section{Methods}{ \subsection{Public methods}{ \itemize{ -\item \href{#method-OptimizerRandomSearch-new}{\code{OptimizerRandomSearch$new()}} -\item \href{#method-OptimizerRandomSearch-clone}{\code{OptimizerRandomSearch$clone()}} +\item \href{#method-OptimizerBatchRandomSearch-new}{\code{OptimizerBatchRandomSearch$new()}} +\item \href{#method-OptimizerBatchRandomSearch-clone}{\code{OptimizerBatchRandomSearch$clone()}} } } \if{html}{\out{ @@ -90,28 +90,28 @@ as.data.table(instance$archive$data) }} \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimizerRandomSearch-new}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerBatchRandomSearch-new}{}}} \subsection{Method \code{new()}}{ Creates a new instance of this \link[R6:R6Class]{R6} class. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimizerRandomSearch$new()}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{OptimizerBatchRandomSearch$new()}\if{html}{\out{
}} } } \if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OptimizerRandomSearch-clone}{}}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerBatchRandomSearch-clone}{}}} \subsection{Method \code{clone()}}{ The objects of this class are cloneable with this method. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OptimizerRandomSearch$clone(deep = FALSE)}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{OptimizerBatchRandomSearch$clone(deep = FALSE)}\if{html}{\out{
}} } \subsection{Arguments}{ diff --git a/man/mlr_terminators_none.Rd b/man/mlr_terminators_none.Rd index a53237fef..9225c9bc6 100644 --- a/man/mlr_terminators_none.Rd +++ b/man/mlr_terminators_none.Rd @@ -5,7 +5,7 @@ \alias{TerminatorNone} \title{None Terminator} \description{ -Mainly useful for optimization algorithms where the stopping is inherently controlled by the algorithm itself (e.g. \link{OptimizerGridSearch}). +Mainly useful for optimization algorithms where the stopping is inherently controlled by the algorithm itself (e.g. \link{OptimizerBatchGridSearch}). } \section{Dictionary}{ diff --git a/man/oi.Rd b/man/oi.Rd new file mode 100644 index 000000000..87adc9835 --- /dev/null +++ b/man/oi.Rd @@ -0,0 +1,41 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/sugar.R +\name{oi} +\alias{oi} +\title{Syntactic Sugar for Optimization Instance Construction} +\usage{ +oi( + objective, + search_space = NULL, + terminator, + callbacks = NULL, + check_values = TRUE, + keep_evals = "all" +) +} +\arguments{ +\item{objective}{(\link{Objective})\cr +Objective function.} + +\item{search_space}{(\link[paradox:ParamSet]{paradox::ParamSet})\cr +Specifies the search space for the \link{Optimizer}. The \link[paradox:ParamSet]{paradox::ParamSet} +describes either a subset of the \code{domain} of the \link{Objective} or it describes +a set of parameters together with a \code{trafo} function that transforms values +from the search space to values of the domain. Depending on the context, this +value defaults to the domain of the objective.} + +\item{terminator}{\link{Terminator}\cr +Termination criterion.} + +\item{callbacks}{(list of \link[mlr3misc:Callback]{mlr3misc::Callback})\cr +List of callbacks.} + +\item{check_values}{(\code{logical(1)})\cr +Should points before the evaluation and the results be checked for validity?} + +\item{keep_evals}{(\code{character(1)})\cr +Keep \code{all} or only \code{best} evaluations in archive?} +} +\description{ +Function to construct a \link{OptimInstanceBatchSingleCrit} and \link{OptimInstanceBatchMultiCrit}. +} diff --git a/man/oi_async.Rd b/man/oi_async.Rd new file mode 100644 index 000000000..aa0486d3b --- /dev/null +++ b/man/oi_async.Rd @@ -0,0 +1,41 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/sugar.R +\name{oi_async} +\alias{oi_async} +\title{Syntactic Sugar for Asynchronous Optimization Instance Construction} +\usage{ +oi_async( + objective, + search_space = NULL, + terminator, + check_values = FALSE, + callbacks = NULL, + rush = NULL +) +} +\arguments{ +\item{objective}{(\link{Objective})\cr +Objective function.} + +\item{search_space}{(\link[paradox:ParamSet]{paradox::ParamSet})\cr +Specifies the search space for the \link{Optimizer}. The \link[paradox:ParamSet]{paradox::ParamSet} +describes either a subset of the \code{domain} of the \link{Objective} or it describes +a set of parameters together with a \code{trafo} function that transforms values +from the search space to values of the domain. Depending on the context, this +value defaults to the domain of the objective.} + +\item{terminator}{\link{Terminator}\cr +Termination criterion.} + +\item{check_values}{(\code{logical(1)})\cr +Should points before the evaluation and the results be checked for validity?} + +\item{callbacks}{(list of \link[mlr3misc:Callback]{mlr3misc::Callback})\cr +List of callbacks.} + +\item{rush}{(\code{Rush})\cr +If a rush instance is supplied, the tuning runs without batches.} +} +\description{ +Function to construct an \link{OptimInstanceAsyncSingleCrit} and \link{OptimInstanceAsyncMultiCrit}. +} diff --git a/man/optimize_async_default.Rd b/man/optimize_async_default.Rd new file mode 100644 index 000000000..90f5cd1c5 --- /dev/null +++ b/man/optimize_async_default.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/OptimizerAsync.R +\name{optimize_async_default} +\alias{optimize_async_default} +\title{Default Asynchronous Optimization} +\usage{ +optimize_async_default(instance, optimizer, design = NULL) +} +\arguments{ +\item{instance}{\link{OptimInstanceAsync}.} + +\item{optimizer}{\link{OptimizerAsync}.} +} +\description{ +Used internally in \link{OptimizerAsync}. +} +\keyword{internal} diff --git a/man/optimize_batch_default.Rd b/man/optimize_batch_default.Rd new file mode 100644 index 000000000..7612744c3 --- /dev/null +++ b/man/optimize_batch_default.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/OptimizerBatch.R +\name{optimize_batch_default} +\alias{optimize_batch_default} +\title{Default Batch Optimization Function} +\usage{ +optimize_batch_default(instance, optimizer) +} +\arguments{ +\item{instance}{\link{OptimInstance}} + +\item{optimizer}{\link{OptimizerBatch}} +} +\value{ +\link[data.table:data.table]{data.table::data.table} +} +\description{ +Used internally in the \link{OptimizerBatch}. +} +\keyword{internal} diff --git a/man/optimize_default.Rd b/man/optimize_default.Rd deleted file mode 100644 index 8bc285228..000000000 --- a/man/optimize_default.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/helper.R -\name{optimize_default} -\alias{optimize_default} -\title{Default optimization function} -\usage{ -optimize_default(inst, self, private) -} -\arguments{ -\item{inst}{\link{OptimInstance}} - -\item{self}{\link{Optimizer}} - -\item{private}{(\code{environment()})} -} -\value{ -\link[data.table:data.table]{data.table::data.table} -} -\description{ -Used internally in the \link{Optimizer}. -Brings together the private \code{.optimize()} method and the private \code{.assign_result()} method. -} -\keyword{internal} diff --git a/man/shrink_ps.Rd b/man/shrink_ps.Rd index 63cc03974..2cbc96f65 100644 --- a/man/shrink_ps.Rd +++ b/man/shrink_ps.Rd @@ -1,5 +1,5 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/OptimizerFocusSearch.R +% Please edit documentation in R/OptimizerBatchFocusSearch.R \name{shrink_ps} \alias{shrink_ps} \title{Shrink a ParamSet towards a point.} diff --git a/man/terminated_error.Rd b/man/terminated_error.Rd new file mode 100644 index 000000000..e0734db55 --- /dev/null +++ b/man/terminated_error.Rd @@ -0,0 +1,15 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/helper.R +\name{terminated_error} +\alias{terminated_error} +\title{Termination Error} +\usage{ +terminated_error(optim_instance) +} +\arguments{ +\item{optim_instance}{\link{OptimInstance}\cr +OptimInstance that terminated.} +} +\description{ +Error class for termination. +} diff --git a/man/trafo_xs.Rd b/man/trafo_xs.Rd new file mode 100644 index 000000000..289d84106 --- /dev/null +++ b/man/trafo_xs.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/helper.R +\name{trafo_xs} +\alias{trafo_xs} +\title{Calculate the transformed x-values} +\usage{ +trafo_xs(xs, search_space) +} +\arguments{ +\item{xs}{(\code{list()}) \cr +List of x-values.} + +\item{search_space}{\link[paradox:ParamSet]{paradox::ParamSet}\cr +Search space.} +} +\description{ +Transforms a given \code{list()} to a list with transformed x values. +} diff --git a/pkgdown/_pkgdown.yml b/pkgdown/_pkgdown.yml index 45290de4d..8ada98a44 100644 --- a/pkgdown/_pkgdown.yml +++ b/pkgdown/_pkgdown.yml @@ -45,9 +45,11 @@ reference: - mlr_terminators - trm - trms + - as_terminator - title: Optimization Instance contents: - starts_with("OptimInstance") + - starts_with("oi") - title: Optimizer contents: - starts_with("Optimizer") @@ -56,14 +58,13 @@ reference: - opts - title: Archive contents: - - Archive - - ArchiveBest + - starts_with("Archive") - Codomain - title: Callbacks contents: - - CallbackOptimization - - callback_optimization - - ContextOptimization + - starts_with("Callback") + - starts_with("callback_") + - starts_with("Context") - bbotk.backup - title: Misc contents: @@ -72,4 +73,7 @@ reference: - branin - bb_optimize - shrink_ps + - bbotk_worker_loop + - trafo_xs + - terminated_error diff --git a/tests/testthat/helper.R b/tests/testthat/helper.R index cb4756d1f..ae65faa1a 100644 --- a/tests/testthat/helper.R +++ b/tests/testthat/helper.R @@ -1,3 +1,6 @@ +library(rush) +library(checkmate) + # Simple 1D Function PS_1D_domain = ps( x = p_dbl(lower = -1, upper = 1), @@ -40,7 +43,6 @@ PS_2D_TRF = ps( } ) - # Simple 2D Function with deps FUN_2D_DEPS = function(xs) { y = sum(as.numeric(xs)^2, na.rm = TRUE) # for PS with dependencies we ignore the not present param @@ -63,19 +65,17 @@ OBJ_2D_2D = ObjectiveRFun$new(fun = FUN_2D_2D, domain = PS_2D, codomain = FUN_2D_2D_CODOMAIN, properties = "multi-crit") # General Helper -MAKE_INST = function(objective = OBJ_2D, search_space = PS_2D, - terminator = 5L) { +MAKE_INST = function(objective = OBJ_2D, search_space = PS_2D, terminator = 5L) { if (is.integer(terminator)) { tt = TerminatorEvals$new() tt$param_set$values$n_evals = terminator terminator = tt } if (objective$codomain$length == 1) { - OptimInstanceSingleCrit$new(objective = objective, search_space = search_space, terminator = terminator) + OptimInstanceBatchSingleCrit$new(objective = objective, search_space = search_space, terminator = terminator) } else { - OptimInstanceMultiCrit$new(objective = objective, search_space = search_space, terminator = terminator) + OptimInstanceBatchMultiCrit$new(objective = objective, search_space = search_space, terminator = terminator) } - } MAKE_INST_1D = function(terminator) { @@ -93,7 +93,7 @@ MAKE_INST_2D_2D = function(terminator) { test_optimizer_1d = function(key, ..., term_evals = 2L, real_evals = term_evals) { terminator = trm("evals", n_evals = term_evals) - instance = OptimInstanceSingleCrit$new(objective = OBJ_1D, search_space = PS_1D, terminator = terminator) + instance = OptimInstanceBatchSingleCrit$new(objective = OBJ_1D, search_space = PS_1D, terminator = terminator) res = test_optimizer(instance = instance, key = key, ..., real_evals = real_evals) x_opt = res$instance$result_x_domain @@ -108,7 +108,7 @@ test_optimizer_1d = function(key, ..., term_evals = 2L, real_evals = term_evals) test_optimizer_2d = function(key, ..., term_evals = 2L, real_evals = term_evals) { terminator = trm("evals", n_evals = term_evals) - instance = OptimInstanceMultiCrit$new(objective = OBJ_2D_2D, search_space = PS_2D, terminator = terminator) + instance = OptimInstanceBatchMultiCrit$new(objective = OBJ_2D_2D, search_space = PS_2D, terminator = terminator) res = test_optimizer(instance = instance, key = key, ..., real_evals = real_evals) x_opt = res$instance$result_x_domain @@ -123,7 +123,7 @@ test_optimizer_2d = function(key, ..., term_evals = 2L, real_evals = term_evals) test_optimizer_dependencies = function(key, ..., term_evals = 2L, real_evals = term_evals) { terminator = trm("evals", n_evals = term_evals) - instance = OptimInstanceSingleCrit$new(objective = OBJ_2D_DEPS, search_space = PS_2D_DEPS, terminator = terminator) + instance = OptimInstanceBatchSingleCrit$new(objective = OBJ_2D_DEPS, search_space = PS_2D_DEPS, terminator = terminator) res = test_optimizer(instance = instance, key = key, ..., real_evals = real_evals) x_opt = res$instance$result_x_domain y_opt = res$instance$result_y @@ -149,9 +149,18 @@ test_optimizer = function(instance, key, ..., real_evals) { list(optimizer = optimizer, instance = instance) } +random_search = function(inst, batch_size = 10) { + assert_r6(inst, "OptimInstance") + batch_size = assert_int(batch_size, coerce = TRUE) + optim = OptimizerBatchRandomSearch$new() + optim$param_set$values$batch_size = batch_size + optim$optimize(inst) + return(inst$archive) +} + MAKE_OPT = function(param_set = ps(), param_classes = c("ParamDbl", "ParamInt"), properties = "single-crit", packages = character(0)) { - Optimizer$new(id = "optimizer", + OptimizerBatch$new(id = "optimizer", param_set = param_set, param_classes = param_classes, properties = properties, @@ -219,3 +228,16 @@ expect_dictionary = function(d, contains = NA_character_, min_items = 0L) { } checkmate::expect_data_table(data.table::as.data.table(d), key = "key", nrows = length(keys)) } + +expect_rush_reset = function(rush, type = "kill") { + processes = rush$processes + rush$reset(type = type) + expect_list(rush$connector$command(c("KEYS", "*")), len = 0) + walk(processes, function(p) p$kill()) +} + +flush_redis = function() { + config = redux::redis_config() + r = redux::hiredis(config) + r$FLUSHDB() +} diff --git a/tests/testthat/helper_rs.R b/tests/testthat/helper_rs.R deleted file mode 100644 index 2438d1670..000000000 --- a/tests/testthat/helper_rs.R +++ /dev/null @@ -1,16 +0,0 @@ -random_search = function(inst, batch_size = 10) { - assert_r6(inst, "OptimInstance") - batch_size = assert_int(batch_size, coerce = TRUE) - optim = OptimizerRandomSearch$new() - optim$param_set$values$batch_size = batch_size - optim$optimize(inst) - return(inst$archive) -} - -# ps1 = ParamDbl$new("x", lower = -1, upper = 1)$rep(2) -# fn = function(x) sum(as.numeric(x)^2) -# obj = Objective$new(fun = fn, domain = ps1, minimize = TRUE, encapsulate = "none") -# term = TerminatorEvals$new() -# term$param_set$values$n_evals = 2 -# a = random_search(obj, term) -# print(a) diff --git a/tests/testthat/setup.R b/tests/testthat/setup.R index 9beede4d7..d7adb9789 100644 --- a/tests/testthat/setup.R +++ b/tests/testthat/setup.R @@ -7,5 +7,11 @@ old_opts = options( # https://github.com/HenrikBengtsson/Wishlist-for-R/issues/88 old_opts = lapply(old_opts, function(x) if (is.null(x)) FALSE else x) -old_threshold = lg$threshold -lg$set_threshold("warn") +lg_bbotk = lgr::get_logger("bbotk") +lg_rush = lgr::get_logger("rush") + +old_threshold_bbotk = lg_bbotk$threshold +old_threshold_rush = lg_rush$threshold + +lg_bbotk$set_threshold(0) +lg_rush$set_threshold(0) diff --git a/tests/testthat/teardown.R b/tests/testthat/teardown.R index 7e2bf1fc7..5bbca3170 100644 --- a/tests/testthat/teardown.R +++ b/tests/testthat/teardown.R @@ -1,2 +1,3 @@ options(old_opts) -lg$set_threshold(old_threshold) +lg_bbotk$set_threshold(old_threshold_bbotk) +lg_rush$set_threshold(old_threshold_rush) diff --git a/tests/testthat/test_ArchiveAsync.R b/tests/testthat/test_ArchiveAsync.R new file mode 100644 index 000000000..0fdc2d321 --- /dev/null +++ b/tests/testthat/test_ArchiveAsync.R @@ -0,0 +1,83 @@ +test_that("ArchiveAsync works with one point", { + skip_if(TRUE) + skip_if_not_installed("rush") + flush_redis() + + rush = RushWorker$new(network_id = "remote_network", host = "local") + + archive = ArchiveAsync$new( + search_space = PS_2D, + codomain = FUN_2D_CODOMAIN, + rush = rush + ) + + xss = list(list(x1 = 1, x2 = 2)) + keys = archive$push_points(xss) + expect_string(keys) + + expect_data_table(archive$queued_data, nrows = 1) + expect_data_table(archive$running_data, nrows = 0) + expect_data_table(archive$finished_data, nrows = 0) + expect_data_table(archive$failed_data, nrows = 0) + expect_equal(archive$data_with_state()$state, "queued") + + expect_list(archive$pop_point(), len = 2) + + expect_data_table(archive$queued_data, nrows = 0) + expect_data_table(archive$running_data, nrows = 1) + expect_data_table(archive$finished_data, nrows = 0) + expect_data_table(archive$failed_data, nrows = 0) + expect_equal(archive$data_with_state()$state, "running") + + archive$push_result(keys, ys = list(y1 = 1, y2 = 2), x_domain = list(x1 = 1, x2 = 2)) + + expect_data_table(archive$queued_data, nrows = 0) + expect_data_table(archive$running_data, nrows = 0) + expect_data_table(archive$finished_data, nrows = 1) + expect_data_table(archive$failed_data, nrows = 0) + expect_equal(archive$data_with_state()$state, "finished") + + xs = list(x1 = 2, x2 = 2) + keys = archive$push_running_point(xs) + + expect_data_table(archive$queued_data, nrows = 0) + expect_data_table(archive$running_data, nrows = 1) + expect_data_table(archive$finished_data, nrows = 1) + expect_data_table(archive$failed_data, nrows = 0) + expect_equal(archive$data_with_state()$state, c("running", "finished")) + + archive$push_failed_point(keys, message = "error") + + expect_data_table(archive$queued_data, nrows = 0) + expect_data_table(archive$running_data, nrows = 0) + expect_data_table(archive$finished_data, nrows = 1) + expect_data_table(archive$failed_data, nrows = 1) + expect_equal(archive$data_with_state()$state, c("finished", "failed")) + + expect_rush_reset(rush, type = "terminate") +}) + +test_that("as.data.table.ArchiveAsync works", { + skip_if(TRUE) + skip_if_not_installed("rush") + flush_redis() + + rush_plan(n_workers = 2) + instance = oi_async( + objective = OBJ_2D, + search_space = PS_2D, + terminator = trm("evals", n_evals = 5L), + ) + + optimizer = opt("async_random_search") + optimizer$optimize(instance) + + data = as.data.table(instance$archive) + expect_data_table(data, min.rows = 5) + expect_names(colnames(data), identical.to = c("state","x1","x2","y","timestamp_xs","pid","worker_id","timestamp_ys","keys","x_domain_x1","x_domain_x2")) + + data = as.data.table(instance$archive, unnest = NULL) + expect_list(data$x_domain) + + expect_rush_reset(instance$rush) +}) diff --git a/tests/testthat/test_Archive.R b/tests/testthat/test_ArchiveBatch.R similarity index 88% rename from tests/testthat/test_Archive.R rename to tests/testthat/test_ArchiveBatch.R index 894020c46..cf18c5187 100644 --- a/tests/testthat/test_Archive.R +++ b/tests/testthat/test_ArchiveBatch.R @@ -1,5 +1,5 @@ test_that("Archive", { - a = Archive$new(PS_2D, FUN_2D_CODOMAIN) + a = ArchiveBatch$new(PS_2D, FUN_2D_CODOMAIN) expect_output(print(a), "Archive") expect_equal(a$n_evals, 0) expect_equal(a$cols_x, c("x1", "x2")) @@ -26,7 +26,7 @@ test_that("Archive", { }) test_that("Archive best works", { - a = Archive$new(PS_2D, FUN_2D_CODOMAIN) + a = ArchiveBatch$new(PS_2D, FUN_2D_CODOMAIN) xdt = data.table(x1 = c(0, 0.5), x2 = c(1, 1)) xss_trafoed = list(list(x1 = c(0, 0.5), x2 = c(1, 1))) ydt = data.table(y = c(1, 0.25)) @@ -39,7 +39,7 @@ test_that("Archive best works", { a$add_evals(xdt, xss_trafoed, ydt) expect_equal(a$best(batch = 2)$batch_nr, 2L) - a = Archive$new(PS_2D, FUN_2D_2D_CODOMAIN) + a = ArchiveBatch$new(PS_2D, FUN_2D_2D_CODOMAIN) xdt = data.table(x1 = c(-1, -1, -1), x2 = c(1, 0, -1)) xss_trafoed = list(list(x1 = -1, x2 = 1), list(x1 = -1, x2 = 0), list(x1 = -1, x2 = 1)) ydt = data.table(y1 = c(1, 1, 1), y2 = c(-1, 0, -1)) @@ -48,7 +48,7 @@ test_that("Archive best works", { }) test_that("Archive on 1D problem works", { - a = Archive$new(PS_1D, FUN_1D_CODOMAIN) + a = ArchiveBatch$new(PS_1D, FUN_1D_CODOMAIN, check_values = TRUE) xdt = data.table(x = 1) xss_trafoed = list(list(x = 1)) ydt = data.table(y = 1) @@ -62,7 +62,7 @@ test_that("Archive on 1D problem works", { }) test_that("Unnest columns", { - a = Archive$new(PS_2D, FUN_2D_CODOMAIN) + a = ArchiveBatch$new(PS_2D, FUN_2D_CODOMAIN, check_values = TRUE) xdt = data.table(x1 = 0, x2 = 1) xss_trafoed = list(list(x1 = 1, x2 = 2)) ydt = data.table(y = 1) @@ -78,7 +78,7 @@ test_that("Unnest columns", { }) test_that("NAs in ydt throw an error", { - a = Archive$new(PS_1D, FUN_1D_CODOMAIN) + a = ArchiveBatch$new(PS_1D, FUN_1D_CODOMAIN) xdt = data.table(x = 1) xss_trafoed = list(list(x = 1)) ydt = data.table(y = NA) @@ -88,20 +88,20 @@ test_that("NAs in ydt throw an error", { test_that("start_time is set by Optimizer", { inst = MAKE_INST() expect_null(inst$archive$start_time) - optimizer = OptimizerRandomSearch$new() + optimizer = OptimizerBatchRandomSearch$new() time = Sys.time() optimizer$optimize(inst) expect_equal(inst$archive$start_time, time, tolerance = 0.5) }) test_that("check_values flag works", { - a = Archive$new(PS_2D, FUN_2D_CODOMAIN, check_values = FALSE) + a = ArchiveBatch$new(PS_2D, FUN_2D_CODOMAIN, check_values = FALSE) xdt = data.table(x1 = c(0, 2), x2 = c(1, 1)) xss_trafoed = list(list(x1 = c(0, 0.5), x2 = c(1, 1))) ydt = data.table(y = c(1, 0.25)) a$add_evals(xdt, xss_trafoed, ydt) - a = Archive$new(PS_2D, FUN_2D_CODOMAIN, check_values = TRUE) + a = ArchiveBatch$new(PS_2D, FUN_2D_CODOMAIN, check_values = TRUE) xdt = data.table(x1 = c(0, 2), x2 = c(1, 1)) xss_trafoed = list(list(x1 = c(0, 0.5), x2 = c(1, 1))) ydt = data.table(y = c(1, 0.25)) @@ -109,7 +109,7 @@ test_that("check_values flag works", { }) test_that("deep clone works", { - a1 = Archive$new(PS_2D, FUN_2D_CODOMAIN) + a1 = ArchiveBatch$new(PS_2D, FUN_2D_CODOMAIN) xdt = data.table(x1 = 0, x2 = 1) xss_trafoed = list(list(x1 = 0, x2 = 1)) ydt = data.table(y = 1) @@ -130,7 +130,7 @@ test_that("best method works with maximization", { codomain$params$y$tags = "maximize" }) - archive = Archive$new(PS_2D, FUN_2D_CODOMAIN) + archive = ArchiveBatch$new(PS_2D, FUN_2D_CODOMAIN) xdt = data.table(x1 = runif(5), x2 = runif(5)) xss_trafoed = list(list(x1 = runif(5), x2 = runif(5))) ydt = data.table(y = c(1, 0.25, 2, 0.5, 0.3)) @@ -149,7 +149,7 @@ test_that("best method works with minimization", { }) - archive = Archive$new(PS_2D, FUN_2D_CODOMAIN) + archive = ArchiveBatch$new(PS_2D, FUN_2D_CODOMAIN) xdt = data.table(x1 = runif(5), x2 = runif(5)) xss_trafoed = list(list(x1 = runif(5), x2 = runif(5))) ydt = data.table(y = c(1, 0.25, 2, 0.5, 0.3)) @@ -168,7 +168,7 @@ test_that("best method returns top n results with maximization", { }) - archive = Archive$new(PS_2D, FUN_2D_CODOMAIN) + archive = ArchiveBatch$new(PS_2D, FUN_2D_CODOMAIN) xdt = data.table(x1 = runif(5), x2 = runif(5)) xss_trafoed = list(list(x1 = runif(5), x2 = runif(5))) ydt = data.table(y = c(1, 0.25, 2, 0.5, 0.3)) @@ -186,7 +186,7 @@ test_that("best method returns top n results with maximization and ties", { codomain$params$y$tags = "maximize" }) - archive = Archive$new(PS_2D, FUN_2D_CODOMAIN) + archive = ArchiveBatch$new(PS_2D, FUN_2D_CODOMAIN) xdt = data.table(x1 = runif(5), x2 = runif(5)) xss_trafoed = list(list(x1 = runif(5), x2 = runif(5))) ydt = data.table(y = c(1, 1, 2, 0.5, 0.5)) @@ -204,7 +204,7 @@ test_that("best method returns top n results with minimization", { codomain$params$y$tags = "minimize" }) - archive = Archive$new(PS_2D, FUN_2D_CODOMAIN) + archive = ArchiveBatch$new(PS_2D, FUN_2D_CODOMAIN) xdt = data.table(x1 = runif(5), x2 = runif(5)) xss_trafoed = list(list(x1 = runif(5), x2 = runif(5))) ydt = data.table(y = c(1, 0.25, 2, 0.5, 0.3)) @@ -222,7 +222,7 @@ test_that("best method returns top n results with minimization and ties", { codomain$params$y$tags = "minimize" }) - archive = Archive$new(PS_2D, FUN_2D_CODOMAIN) + archive = ArchiveBatch$new(PS_2D, FUN_2D_CODOMAIN) xdt = data.table(x1 = runif(5), x2 = runif(5)) xss_trafoed = list(list(x1 = runif(5), x2 = runif(5))) ydt = data.table(y = c(1, 0.25, 0.5, 0.3, 0.3)) diff --git a/tests/testthat/test_ArchiveBest.R b/tests/testthat/test_ArchiveBest.R deleted file mode 100644 index 3843e8b3b..000000000 --- a/tests/testthat/test_ArchiveBest.R +++ /dev/null @@ -1,69 +0,0 @@ -test_that("ArchiveBest", { - a = ArchiveBest$new(PS_2D, FUN_2D_CODOMAIN) - expect_output(print(a), "ArchiveBest") - expect_equal(a$n_evals, 0) - expect_equal(a$cols_x, c("x1", "x2")) - expect_equal(a$cols_y, "y") - xdt = data.table(x1 = 0, x2 = 1) - xss_trafoed = list(list(x1 = 0, x2 = 1)) - ydt = data.table(y = 1) - a$add_evals(xdt, xss_trafoed, ydt) - expect_equal(a$n_evals, 1) - expect_data_table(a$data, nrows = 0) - a$clear() - expect_data_table(a$data, nrows = 0) -}) - -test_that("Archive best works", { - a = ArchiveBest$new(PS_2D, FUN_2D_CODOMAIN) - expect_error(a$best(), "No results stored in archive") - xdt = data.table(x1 = c(0, 0.5), x2 = c(1, 1)) - xss_trafoed = list(list(x1 = c(0, 0.5), x2 = c(1, 1))) - ydt = data.table(y = c(1, 0.25)) - a$add_evals(xdt, xss_trafoed, ydt) - expect_equal(a$best(), data.table(x1 = 0.5, x2 = 1, y = 0.25)) - - xdt = data.table(x1 = 1, x2 = 1) - xss_trafoed = list(list(x1 = 1, x2 = 1)) - ydt = data.table(y = 0) - a$add_evals(xdt, xss_trafoed, ydt) - expect_equal(a$best(), data.table(x1 = 1, x2 = 1, y = 0)) - - codomain = ps(y = p_dbl(tags = "maximize")) - - a = ArchiveBest$new(PS_2D, codomain) - expect_error(a$best(), "No results stored in archive") - xdt = data.table(x1 = c(0, 0.5), x2 = c(1, 1)) - xss_trafoed = list(list(x1 = c(0, 0.5), x2 = c(1, 1))) - ydt = data.table(y = c(1, 0.25)) - a$add_evals(xdt, xss_trafoed, ydt) - expect_equal(a$best(), data.table(x1 = 0, x2 = 1, y = 1)) - - xdt = data.table(x1 = 1, x2 = 1) - xss_trafoed = list(list(x1 = 1, x2 = 1)) - ydt = data.table(y = 2) - a$add_evals(xdt, xss_trafoed, ydt) - expect_equal(a$best(), data.table(x1 = 1, x2 = 1, y = 2)) -}) - -test_that("ArchiveBest multi-crit works", { - a = ArchiveBest$new(PS_2D, FUN_2D_2D_CODOMAIN) - expect_output(print(a), "ArchiveBest") - expect_equal(a$n_evals, 0) - expect_equal(a$cols_x, c("x1", "x2")) - expect_equal(a$cols_y, c("y1", "y2")) - xdt = data.table(x1 = c(0, 1), x2 = c(1, 0)) - xss_trafoed = list(list(x1 = c(0, 1), x2 = c(1, 0))) - ydt = data.table(y1 = c(0, 1), y2 = c(1, 0)) - a$add_evals(xdt, xss_trafoed, ydt) - expect_equal(a$n_evals, 2) - expect_data_table(a$data, nrows = 0) - - expect_equal(a$best(), data.table(x1 = 0, x2 = 1, y1 = 0, y2 = 1)) - - xdt = data.table(x1 = 10, x2 = 10) - xss_trafoed = list(list(x1 = 10, x2 = 10)) - ydt = data.table(y1 = -20, y2 = 20) - a$add_evals(xdt, xss_trafoed, ydt) - expect_equal(a$best(), data.table(x1 = 10, x2 = 10, y1 = -20, y2 = 20)) -}) diff --git a/tests/testthat/test_Callback.R b/tests/testthat/test_Callback.R index bb63f3f26..7b64c906c 100644 --- a/tests/testthat/test_Callback.R +++ b/tests/testthat/test_Callback.R @@ -1,11 +1,11 @@ test_that("on_optimization_begin works", { - callback = callback_optimization(id = "test", + callback = callback_batch(id = "test", on_optimization_begin = function(callback, context) { context$instance$terminator$param_set$values$n_evals = 20 } ) - instance = OptimInstanceSingleCrit$new( + instance = OptimInstanceBatchSingleCrit$new( objective = OBJ_1D, search_space = PS_1D, terminator = trm("evals", n_evals = 10), @@ -14,18 +14,18 @@ test_that("on_optimization_begin works", { optimizer = opt("random_search") optimizer$optimize(instance) - expect_class(get_private(instance)$.context, "ContextOptimization") + expect_class(instance$objective$context, "ContextBatch") expect_equal(instance$terminator$param_set$values$n_evals, 20) }) test_that("on_optimization_end works", { - callback = callback_optimization(id = "test", + callback = callback_batch(id = "test", on_optimization_end = function(callback, context) { context$instance$terminator$param_set$values$n_evals = 20 } ) - instance = OptimInstanceSingleCrit$new( + instance = OptimInstanceBatchSingleCrit$new( objective = OBJ_1D, search_space = PS_1D, terminator = trm("evals", n_evals = 10), @@ -34,19 +34,18 @@ test_that("on_optimization_end works", { optimizer = opt("random_search") optimizer$optimize(instance) - expect_class(get_private(instance)$.context, "ContextOptimization") - expect_equal(instance$terminator$param_set$values$n_evals, 20) + expect_class(instance$objective$context, "ContextBatch") + expect_equal(instance$terminator$param_set$values$n_evals, 10) }) - -test_that("on_result in OptimInstanceSingleCrit works", { - callback = callback_optimization(id = "test", +test_that("on_result in OptimInstanceBatchSingleCrit works", { + callback = callback_batch(id = "test", on_result = function(callback, context) { context$result$y = 2 } ) - instance = OptimInstanceSingleCrit$new( + instance = OptimInstanceBatchSingleCrit$new( objective = OBJ_1D, search_space = PS_1D, terminator = trm("evals", n_evals = 10), @@ -55,19 +54,19 @@ test_that("on_result in OptimInstanceSingleCrit works", { optimizer = opt("random_search") optimizer$optimize(instance) - expect_class(get_private(instance)$.context, "ContextOptimization") + expect_class(instance$objective$context, "ContextBatch") expect_equal(instance$result$y, 2) }) -test_that("on_result in OptimInstanceMultiCrit works", { - callback = callback_optimization(id = "test", +test_that("on_result in OptimInstanceBatchMultiCrit works", { + callback = callback_batch(id = "test", on_result = function(callback, context) { context$result$y1 = 2 context$result$y2 = 2 } ) - instance = OptimInstanceMultiCrit$new( + instance = OptimInstanceBatchMultiCrit$new( objective = OBJ_2D_2D, search_space = PS_2D, terminator = trm("evals", n_evals = 10), diff --git a/tests/testthat/test_OptimInstanceAsyncSingleCrit.R b/tests/testthat/test_OptimInstanceAsyncSingleCrit.R new file mode 100644 index 000000000..87216335a --- /dev/null +++ b/tests/testthat/test_OptimInstanceAsyncSingleCrit.R @@ -0,0 +1,61 @@ +test_that("initializing OptimInstanceAsyncSingleCrit works", { + skip_if(TRUE) + #skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + rush_plan(n_workers = 2) + + instance = oi_async( + objective = OBJ_2D, + search_space = PS_2D, + terminator = trm("evals", n_evals = 5L), + ) + + expect_r6(instance$archive, "ArchiveAsync") + expect_r6(instance$objective, "Objective") + expect_r6(instance$search_space, "ParamSet") + expect_r6(instance$terminator, "Terminator") + expect_r6(instance$rush, "Rush") + expect_null(instance$result) + + expect_rush_reset(instance$rush) +}) + +test_that("rush controller can be passed to OptimInstanceAsyncSingleCrit", { + skip_if(TRUE) + #skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + rush = rsh(network_id = "remote_network") + + instance = oi_async( + objective = OBJ_2D, + search_space = PS_2D, + terminator = trm("evals", n_evals = 5L), + rush = rush + ) + + expect_class(instance$rush, "Rush") + expect_equal(instance$rush$network_id, "remote_network") +}) + +test_that("context is initialized correctly", { + skip_if(TRUE) + #skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + rush_plan(n_workers = 2) + instance = oi_async( + objective = OBJ_2D, + search_space = PS_2D, + terminator = trm("evals", n_evals = 5L), + ) + + optimizer = opt("async_random_search") + optimizer$optimize(instance) + + expect_r6(instance$objective$context, "ContextAsync") +}) diff --git a/tests/testthat/test_OptimInstanceMultiCrit.R b/tests/testthat/test_OptimInstanceBatchMultiCrit.R similarity index 77% rename from tests/testthat/test_OptimInstanceMultiCrit.R rename to tests/testthat/test_OptimInstanceBatchMultiCrit.R index c109ec901..04f914cc0 100644 --- a/tests/testthat/test_OptimInstanceMultiCrit.R +++ b/tests/testthat/test_OptimInstanceBatchMultiCrit.R @@ -1,6 +1,6 @@ -test_that("OptimInstanceMultiCrit", { +test_that("OptimInstanceBatchMultiCrit", { inst = MAKE_INST_2D_2D(20L) - expect_output(print(inst), "OptimInstanceMultiCrit") + expect_output(print(inst), "OptimInstanceBatchMultiCrit") expect_r6(inst$archive, "Archive") expect_data_table(inst$archive$data, nrows = 0L) expect_identical(inst$archive$n_evals, 0L) @@ -24,10 +24,10 @@ test_that("OptimInstanceMultiCrit", { expect_equal(inst$result_x_domain, replicate(n = 2, list(x1 = 0, x2 = 0), simplify = FALSE)) }) -test_that("OptimInstanceMultiCrit with 1 Crit", { +test_that("OptimInstanceBatchMultiCrit with 1 Crit", { tt = trm("evals", n_evals = 5) - inst = OptimInstanceMultiCrit$new(objective = OBJ_2D, search_space = PS_2D, terminator = tt) - optimizer = OptimizerRandomSearch$new() + inst = OptimInstanceBatchMultiCrit$new(objective = OBJ_2D, search_space = PS_2D, terminator = tt) + optimizer = OptimizerBatchRandomSearch$new() optimizer$optimize(inst) expect_data_table(inst$result_y, ncols = 1) expect_data_table(inst$result_x_search_space) @@ -45,7 +45,7 @@ test_that("objective_function works", { expect_equal(y, c(y1 = 1, y2 = 1)) }) -test_that("OptimInstanceMultiCrit works with empty search space", { +test_that("OptimInstanceBatchMultiCrit works with empty search space", { fun = function(xs) { c(y = 10 + sample(c(0, 1), 1), z = 20 + sample(c(0, 1), 1)) } @@ -57,12 +57,12 @@ test_that("OptimInstanceMultiCrit works with empty search space", { expect_numeric(objective$eval(list())) # instance - instance = OptimInstanceMultiCrit$new(objective, terminator = trm("evals", n_evals = 20)) + instance = OptimInstanceBatchMultiCrit$new(objective, terminator = trm("evals", n_evals = 20)) instance$eval_batch(data.table()) expect_data_table(instance$archive$data, nrows = 1) # optimizer lenght(y) > 1 - instance = OptimInstanceMultiCrit$new(objective, terminator = trm("evals", n_evals = 20)) + instance = OptimInstanceBatchMultiCrit$new(objective, terminator = trm("evals", n_evals = 20)) optimizer = opt("random_search") optimizer$optimize(instance) expect_data_table(instance$archive$data, nrows = 20) @@ -70,7 +70,7 @@ test_that("OptimInstanceMultiCrit works with empty search space", { # optimizer lenght(y) == 1 - instance = OptimInstanceMultiCrit$new(objective, terminator = trm("evals", n_evals = 1)) + instance = OptimInstanceBatchMultiCrit$new(objective, terminator = trm("evals", n_evals = 1)) optimizer = opt("random_search") optimizer$optimize(instance) diff --git a/tests/testthat/test_OptimInstanceSingleCrit.R b/tests/testthat/test_OptimInstanceBatchSingleCrit.R similarity index 83% rename from tests/testthat/test_OptimInstanceSingleCrit.R rename to tests/testthat/test_OptimInstanceBatchSingleCrit.R index 6aaa0a714..f7bf26d31 100644 --- a/tests/testthat/test_OptimInstanceSingleCrit.R +++ b/tests/testthat/test_OptimInstanceBatchSingleCrit.R @@ -1,6 +1,6 @@ -test_that("OptimInstanceSingleCrit", { +test_that("OptimInstanceBatchSingleCrit", { inst = MAKE_INST_2D(20L) - expect_r6(inst$archive, "Archive") + expect_r6(inst$archive, "ArchiveBatch") expect_data_table(inst$archive$data, nrows = 0L) expect_identical(inst$archive$n_evals, 0L) expect_identical(inst$archive$n_batch, 0L) @@ -36,7 +36,7 @@ test_that("OptimInstance works with trafos", { expect_data_table(inst$archive$data, nrows = 3L) expect_equal(inst$archive$data$y, c(2, 0, 2)) expect_equal(inst$archive$data$x_domain[[1]], list(x1 = -1, x2 = -1)) - expect_output(print(inst), "") + expect_output(print(inst), "") }) test_that("OptimInstance works with extras input", { @@ -110,16 +110,16 @@ test_that("objective_function works", { }) test_that("search_space is optional", { - inst = OptimInstanceSingleCrit$new(objective = OBJ_1D, terminator = TerminatorEvals$new()) + inst = OptimInstanceBatchSingleCrit$new(objective = OBJ_1D, terminator = TerminatorEvals$new()) expect_identical(inst$search_space, OBJ_1D$domain) }) test_that("OptimInstaceSingleCrit does not work with codomain > 1", { - expect_error(OptimInstanceSingleCrit$new(objective = OBJ_2D_2D, + expect_error(OptimInstanceBatchSingleCrit$new(objective = OBJ_2D_2D, terminator = trm("none")), "Codomain > 1") }) -test_that("OptimInstanceSingleCrit$eval_batch() throws and error if columns are missing", { +test_that("OptimInstanceBatchSingleCrit$eval_batch() throws and error if columns are missing", { inst = MAKE_INST_2D(20L) expect_error(inst$eval_batch(data.table(x1 = 0)), regexp = "include the elements", @@ -143,7 +143,7 @@ test_that("domain, search_space and TuneToken work", { ) # only domain - instance = OptimInstanceSingleCrit$new( + instance = OptimInstanceBatchSingleCrit$new( objective = objective, terminator = trm("none") ) @@ -155,7 +155,7 @@ test_that("domain, search_space and TuneToken work", { x1 = p_dbl(-10, 10) ) - instance = OptimInstanceSingleCrit$new( + instance = OptimInstanceBatchSingleCrit$new( objective = objective, terminator = trm("none"), search_space = search_space @@ -171,7 +171,7 @@ test_that("domain, search_space and TuneToken work", { codomain = codomain ) - instance = OptimInstanceSingleCrit$new( + instance = OptimInstanceBatchSingleCrit$new( objective = objective, terminator = trm("none"), ) @@ -179,11 +179,11 @@ test_that("domain, search_space and TuneToken work", { expect_equal(domain$search_space(), instance$search_space) # TuneToken and search_space - expect_error(OptimInstanceSingleCrit$new(objective = objective, terminator = trm("none"), search_space = search_space), + expect_error(OptimInstanceBatchSingleCrit$new(objective = objective, terminator = trm("none"), search_space = search_space), regexp = "If the domain contains TuneTokens, you cannot supply a search_space") }) -test_that("OptimInstanceSingleCrit works with empty search space", { +test_that("OptimInstanceBatchSingleCrit works with empty search space", { fun = function(xs) { c(y = 10 + rnorm(1)) } @@ -195,12 +195,12 @@ test_that("OptimInstanceSingleCrit works with empty search space", { expect_numeric(objective$eval(list())) # instance - instance = OptimInstanceSingleCrit$new(objective, terminator = trm("evals", n_evals = 20)) + instance = OptimInstanceBatchSingleCrit$new(objective, terminator = trm("evals", n_evals = 20)) instance$eval_batch(data.table()) expect_data_table(instance$archive$data, nrows = 1) # optimizer - instance = OptimInstanceSingleCrit$new(objective, terminator = trm("evals", n_evals = 20)) + instance = OptimInstanceBatchSingleCrit$new(objective, terminator = trm("evals", n_evals = 20)) optimizer = opt("random_search") optimizer$optimize(instance) expect_data_table(instance$archive$data, nrows = 20) @@ -226,3 +226,20 @@ test_that("$clear() method works", { inst$clear() expect_equal(inst, inst_copy) }) + +test_that("context is initialized correctly", { + inst = MAKE_INST_2D(20L) + optimizer = opt("random_search") + optimizer$optimize(inst) + expect_r6(inst$objective$context, "ContextBatch") +}) + +test_that("context deep clone", { + inst = MAKE_INST_2D(20L) + optimizer = opt("random_search") + optimizer$optimize(inst) + expect_r6(inst$objective$context, "ContextBatch") + + inst_copy = inst$clone(deep = TRUE) + expect_null(inst_copy$objective$context) +}) diff --git a/tests/testthat/test_OptimizerAsynDesignPoints.R b/tests/testthat/test_OptimizerAsynDesignPoints.R new file mode 100644 index 000000000..fb9f12f90 --- /dev/null +++ b/tests/testthat/test_OptimizerAsynDesignPoints.R @@ -0,0 +1,22 @@ +test_that("OptimizerAsyncRandomSearch works", { + skip_if(TRUE) + #skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + design = data.table(x1 = c(0.1, 0.2), x2 = c(0.3, 0.4)) + optimizer = opt("async_design_points", design = design) + expect_class(optimizer, "OptimizerAsync") + + rush_plan(n_workers = 2) + instance = oi_async( + objective = OBJ_2D, + search_space = PS_2D, + terminator = trm("none"), + ) + + expect_data_table(optimizer$optimize(instance), nrows = 1) + expect_data_table(instance$archive$data, nrows = 2) + + expect_rush_reset(instance$rush) +}) diff --git a/tests/testthat/test_OptimizerAsynGridSearch.R b/tests/testthat/test_OptimizerAsynGridSearch.R new file mode 100644 index 000000000..1776cdd25 --- /dev/null +++ b/tests/testthat/test_OptimizerAsynGridSearch.R @@ -0,0 +1,21 @@ +test_that("OptimizerAsyncGridSearch works", { + skip_if(TRUE) + #skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + optimizer = opt("async_grid_search") + expect_class(optimizer, "OptimizerAsync") + + rush_plan(n_workers = 2) + instance = oi_async( + objective = OBJ_2D, + search_space = PS_2D, + terminator = trm("none"), + ) + + expect_data_table(optimizer$optimize(instance), nrows = 1) + expect_data_table(instance$archive$data, nrows = 100) + + expect_rush_reset(instance$rush) +}) diff --git a/tests/testthat/test_OptimizerAsync.R b/tests/testthat/test_OptimizerAsync.R new file mode 100644 index 000000000..1b586ab71 --- /dev/null +++ b/tests/testthat/test_OptimizerAsync.R @@ -0,0 +1,69 @@ +test_that("OptimizerAsync starts local workers", { + skip_if(TRUE) + #skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + rush_plan(n_workers = 2) + + instance = oi_async( + objective = OBJ_2D, + search_space = PS_2D, + terminator = trm("evals", n_evals = 5L), + ) + optimizer = opt("async_random_search") + optimizer$optimize(instance) + + expect_data_table(instance$rush$worker_info, nrows = 2) + + expect_rush_reset(instance$rush) +}) + +test_that("OptimizerAsync assigns result", { + skip_if(TRUE) + #skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + rush_plan(n_workers = 2) + + instance = oi_async( + objective = OBJ_2D, + search_space = PS_2D, + terminator = trm("evals", n_evals = 5L), + ) + optimizer = opt("async_random_search") + optimizer$optimize(instance) + + expect_data_table(instance$result, nrows = 1) + + expect_rush_reset(instance$rush) +}) + +test_that("OptimizerAsync throws an error when all workers are lost", { + skip_if(TRUE) + #skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + rush_plan(n_workers = 2) + + objective = ObjectiveRFun$new( + fun = function(xs) { + stop("Error") + }, + domain = PS_2D_domain, + properties = "single-crit" + ) + + instance = oi_async( + objective = objective, + search_space = PS_2D, + terminator = trm("evals", n_evals = 5L), + ) + optimizer = opt("async_random_search") + + expect_error(optimizer$optimize(instance), "All workers have crashed.") + + expect_rush_reset(instance$rush) +}) diff --git a/tests/testthat/test_OptimizerAsyncRandomSearch.R b/tests/testthat/test_OptimizerAsyncRandomSearch.R new file mode 100644 index 000000000..eeaa654a4 --- /dev/null +++ b/tests/testthat/test_OptimizerAsyncRandomSearch.R @@ -0,0 +1,21 @@ +test_that("OptimizerAsyncRandomSearch works", { + skip_if(TRUE) + #skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + optimizer = opt("async_random_search") + expect_class(optimizer, "OptimizerAsync") + + rush_plan(n_workers = 2) + instance = oi_async( + objective = OBJ_2D, + search_space = PS_2D, + terminator = trm("evals", n_evals = 5L), + ) + + expect_data_table(optimizer$optimize(instance), nrows = 1) + expect_data_table(instance$archive$data, min.rows = 5) + + expect_rush_reset(instance$rush) +}) diff --git a/tests/testthat/test_OptimizerCmaes.R b/tests/testthat/test_OptimizerCmaes.R index f75d57c30..f29e81f79 100644 --- a/tests/testthat/test_OptimizerCmaes.R +++ b/tests/testthat/test_OptimizerCmaes.R @@ -1,4 +1,4 @@ -test_that("OptimizerCmaes", { +test_that("OptimizerBatchCmaes", { skip_if_not_installed("adagio") search_space = domain = ps( @@ -17,15 +17,15 @@ test_that("OptimizerCmaes", { domain = domain, codomain = codomain) - instance = OptimInstanceSingleCrit$new( + instance = OptimInstanceBatchSingleCrit$new( objective = objective, search_space = search_space, terminator = trm("evals", n_evals = 10)) z = test_optimizer(instance, "cmaes", real_evals = 10L) - expect_class(z$optimizer, "OptimizerCmaes") - expect_output(print(z$optimizer), "OptimizerCmaes") + expect_class(z$optimizer, "OptimizerBatchCmaes") + expect_output(print(z$optimizer), "OptimizerBatchCmaes") expect_error(test_optimizer_2d("cmaes", term_evals = 10L), "multi-crit objectives") }) diff --git a/tests/testthat/test_OptimizerDesignPoints.R b/tests/testthat/test_OptimizerDesignPoints.R index c2f58b354..d04abecec 100644 --- a/tests/testthat/test_OptimizerDesignPoints.R +++ b/tests/testthat/test_OptimizerDesignPoints.R @@ -1,13 +1,13 @@ -test_that("OptimizerDesignPoints", { +test_that("OptimizerBatchDesignPoints", { design = data.table(x = c(-1, 0, 1)) z = test_optimizer_1d("design_points", design = design, term_evals = 10L, real_evals = 3) - expect_class(z$optimizer, "OptimizerDesignPoints") - expect_output(print(z$optimizer), "OptimizerDesignPoints") + expect_class(z$optimizer, "OptimizerBatchDesignPoints") + expect_output(print(z$optimizer), "OptimizerBatchDesignPoints") design = data.table(x1 = c(-1, 0, 1), x2 = c(-1, 0, 1)) z = test_optimizer_2d("design_points", design = design, term_evals = 10L, real_evals = 3) - expect_class(z$optimizer, "OptimizerDesignPoints") - expect_output(print(z$optimizer), "OptimizerDesignPoints") + expect_class(z$optimizer, "OptimizerBatchDesignPoints") + expect_output(print(z$optimizer), "OptimizerBatchDesignPoints") expect_error(test_optimizer_1d("design_points", term_evals = 10L, real_evals = 3), "Please set design datatable") diff --git a/tests/testthat/test_OptimizerFocusSearch.R b/tests/testthat/test_OptimizerFocusSearch.R index da8901913..f31cf5b24 100644 --- a/tests/testthat/test_OptimizerFocusSearch.R +++ b/tests/testthat/test_OptimizerFocusSearch.R @@ -1,11 +1,11 @@ -test_that("OptimizerFocusSearch", { +test_that("OptimizerBatchFocusSearch", { z = test_optimizer_1d("focus_search", n_points = 1L, maxit = 10L, term_evals = 10L) - expect_class(z$optimizer, "OptimizerFocusSearch") - expect_output(print(z$optimizer), "OptimizerFocusSearch") + expect_class(z$optimizer, "OptimizerBatchFocusSearch") + expect_output(print(z$optimizer), "OptimizerBatchFocusSearch") z = test_optimizer_1d("focus_search", n_points = 10L, maxit = 10L, term_evals = 100L) - expect_class(z$optimizer, "OptimizerFocusSearch") - expect_output(print(z$optimizer), "OptimizerFocusSearch") + expect_class(z$optimizer, "OptimizerBatchFocusSearch") + expect_output(print(z$optimizer), "OptimizerBatchFocusSearch") z = test_optimizer_dependencies("focus_search", n_points = 1L, maxit = 10L, term_evals = 10L) diff --git a/tests/testthat/test_OptimizerGenSA.R b/tests/testthat/test_OptimizerGenSA.R index adf53e0c0..b06ce8920 100644 --- a/tests/testthat/test_OptimizerGenSA.R +++ b/tests/testthat/test_OptimizerGenSA.R @@ -1,9 +1,9 @@ -test_that("OptimizerGenSA", { +test_that("OptimizerBatchGenSA", { skip_if_not_installed("GenSA") z = test_optimizer_1d("gensa", term_evals = 10L) - expect_class(z$optimizer, "OptimizerGenSA") - expect_output(print(z$optimizer), "OptimizerGenSA") + expect_class(z$optimizer, "OptimizerBatchGenSA") + expect_output(print(z$optimizer), "OptimizerBatchGenSA") expect_error(test_optimizer_2d("gensa", term_evals = 10L), "multi-crit objectives") }) diff --git a/tests/testthat/test_OptimizerGridSearch.R b/tests/testthat/test_OptimizerGridSearch.R index b7c85a69f..0eb15c43c 100644 --- a/tests/testthat/test_OptimizerGridSearch.R +++ b/tests/testthat/test_OptimizerGridSearch.R @@ -1,9 +1,9 @@ -test_that("OptimizerGridSearch", { +test_that("OptimizerBatchGridSearch", { z = test_optimizer_1d("grid_search", term_evals = 10L) - expect_class(z$optimizer, "OptimizerGridSearch") - expect_output(print(z$optimizer), "OptimizerGridSearch") + expect_class(z$optimizer, "OptimizerBatchGridSearch") + expect_output(print(z$optimizer), "OptimizerBatchGridSearch") z = test_optimizer_2d("grid_search", term_evals = 10L) - expect_class(z$optimizer, "OptimizerGridSearch") - expect_output(print(z$optimizer), "OptimizerGridSearch") + expect_class(z$optimizer, "OptimizerBatchGridSearch") + expect_output(print(z$optimizer), "OptimizerBatchGridSearch") }) diff --git a/tests/testthat/test_OptimizerIrace.R b/tests/testthat/test_OptimizerIrace.R index 3d3ab69ec..a7a480c22 100644 --- a/tests/testthat/test_OptimizerIrace.R +++ b/tests/testthat/test_OptimizerIrace.R @@ -1,4 +1,4 @@ -test_that("OptimizerIrace minimize works", { +test_that("OptimizerBatchIrace minimize works", { skip_if_not_installed("irace") search_space = domain = ps( @@ -12,7 +12,7 @@ test_that("OptimizerIrace minimize works", { objective = ObjectiveRFunDt$new(fun = fun, domain = domain) - instance = OptimInstanceSingleCrit$new( + instance = OptimInstanceBatchSingleCrit$new( objective = objective, search_space = search_space, terminator = trm("evals", n_evals = 1000)) @@ -38,7 +38,7 @@ test_that("OptimizerIrace minimize works", { expect_equal(unname(instance$result_y), mean(archive[configuration == configuration_id, y])) }) -test_that("OptimizerIrace maximize works", { +test_that("OptimizerBatchIrace maximize works", { skip_if_not_installed("irace") search_space = domain = ps( @@ -53,7 +53,7 @@ test_that("OptimizerIrace maximize works", { codomain = ps(y = p_dbl(tags = "maximize")) objective = ObjectiveRFunDt$new(fun = fun, domain = domain, codomain = codomain) - instance = OptimInstanceSingleCrit$new( + instance = OptimInstanceBatchSingleCrit$new( objective = objective, search_space = search_space, terminator = trm("evals", n_evals = 96)) @@ -78,7 +78,7 @@ test_that("OptimizerIrace maximize works", { expect_equal(unname(instance$result_y), mean(archive[configuration == configuration_id, y])) }) -test_that("OptimizerIrace assertions works", { +test_that("OptimizerBatchIrace assertions works", { skip_if_not_installed("irace") search_space = domain = ps( @@ -93,7 +93,7 @@ test_that("OptimizerIrace assertions works", { objective = ObjectiveRFunDt$new(fun = fun, domain = domain) # unsupported terminators - instance = OptimInstanceSingleCrit$new( + instance = OptimInstanceBatchSingleCrit$new( objective = objective, search_space = search_space, terminator = trm("perf_reached", level = 0.1)) @@ -105,7 +105,7 @@ test_that("OptimizerIrace assertions works", { fixed = TRUE) }) -test_that("OptimizerIrace works with passed constants set", { +test_that("OptimizerBatchIrace works with passed constants set", { skip_if_not_installed("irace") search_space = domain = ps( @@ -119,7 +119,7 @@ test_that("OptimizerIrace works with passed constants set", { objective = ObjectiveRFunDt$new(fun = fun, domain = domain, constants = ps(instances = p_uty())) - instance = OptimInstanceSingleCrit$new( + instance = OptimInstanceBatchSingleCrit$new( objective = objective, search_space = search_space, terminator = trm("evals", n_evals = 96)) @@ -130,7 +130,7 @@ test_that("OptimizerIrace works with passed constants set", { expect_data_table(instance$result, nrows = 1) }) -test_that("OptimizerIrace works without passed constants set", { +test_that("OptimizerBatchIrace works without passed constants set", { skip_if_not_installed("irace") search_space = domain = ps( @@ -144,7 +144,7 @@ test_that("OptimizerIrace works without passed constants set", { objective = ObjectiveRFunDt$new(fun = fun, domain = domain) - instance = OptimInstanceSingleCrit$new( + instance = OptimInstanceBatchSingleCrit$new( objective = objective, search_space = search_space, terminator = trm("evals", n_evals = 96)) @@ -164,7 +164,7 @@ test_that("paradox_to_irace without dependencies", { # only ParamUty pps = ps(uty = p_uty()) - expect_error(paradox_to_irace(pps, 4), regexp = " not supported by ", fixed = TRUE) + expect_error(paradox_to_irace(pps, 4), regexp = " not supported by ", fixed = TRUE) # mixed set pps = ps( diff --git a/tests/testthat/test_OptimizerNLoptr.R b/tests/testthat/test_OptimizerNLoptr.R index fa8f6d572..f48642824 100644 --- a/tests/testthat/test_OptimizerNLoptr.R +++ b/tests/testthat/test_OptimizerNLoptr.R @@ -1,10 +1,10 @@ -test_that("OptimizerNLoptr", { +test_that("OptimizerBatchNLoptr", { skip_on_os("windows") skip_if_not_installed("nloptr") z = test_optimizer_1d("nloptr", algorithm = "NLOPT_LN_BOBYQA", xtol_rel = -1, xtol_abs = -1, ftol_rel = -1, ftol_abs = -1, term_evals = 5L) - expect_class(z$optimizer, "OptimizerNLoptr") - expect_output(print(z$optimizer), "OptimizerNLoptr") + expect_class(z$optimizer, "OptimizerBatchNLoptr") + expect_output(print(z$optimizer), "OptimizerBatchNLoptr") }) diff --git a/tests/testthat/test_OptimizerRandomSearch.R b/tests/testthat/test_OptimizerRandomSearch.R index 45a398d6a..01c8ece60 100644 --- a/tests/testthat/test_OptimizerRandomSearch.R +++ b/tests/testthat/test_OptimizerRandomSearch.R @@ -1,15 +1,15 @@ -test_that("OptimizerRandomSearch", { +test_that("OptimizerBatchRandomSearch", { z = test_optimizer_1d("random_search", term_evals = 10L) - expect_class(z$optimizer, "OptimizerRandomSearch") - expect_output(print(z$optimizer), "OptimizerRandomSearch") + expect_class(z$optimizer, "OptimizerBatchRandomSearch") + expect_output(print(z$optimizer), "OptimizerBatchRandomSearch") z = test_optimizer_2d("random_search", term_evals = 10L) - expect_class(z$optimizer, "OptimizerRandomSearch") - expect_output(print(z$optimizer), "OptimizerRandomSearch") + expect_class(z$optimizer, "OptimizerBatchRandomSearch") + expect_output(print(z$optimizer), "OptimizerBatchRandomSearch") z = test_optimizer_2d("random_search", term_evals = 10L, batch_size = 10) - expect_class(z$optimizer, "OptimizerRandomSearch") - expect_output(print(z$optimizer), "OptimizerRandomSearch") + expect_class(z$optimizer, "OptimizerBatchRandomSearch") + expect_output(print(z$optimizer), "OptimizerBatchRandomSearch") z = test_optimizer_dependencies("random_search", term_evals = 10L, batch_size = 1) diff --git a/tests/testthat/test_TerminatorClockTime.R b/tests/testthat/test_TerminatorClockTime.R index 4aa0d1ad9..e1f7e65f0 100644 --- a/tests/testthat/test_TerminatorClockTime.R +++ b/tests/testthat/test_TerminatorClockTime.R @@ -23,7 +23,7 @@ test_that("status method works", { test_that("TerminatorClockTime works with empty archive", { terminator = TerminatorClockTime$new() terminator$param_set$values$stop_time = Sys.time() + 2L - archive = Archive$new(ps(x = p_dbl()), ps(y = p_dbl(tags = "minimize"))) + archive = ArchiveBatch$new(ps(x = p_dbl()), ps(y = p_dbl(tags = "minimize"))) expect_false(terminator$is_terminated(archive)) }) diff --git a/tests/testthat/test_TerminatorEvals.R b/tests/testthat/test_TerminatorEvals.R index 8473c125e..9df9b56ca 100644 --- a/tests/testthat/test_TerminatorEvals.R +++ b/tests/testthat/test_TerminatorEvals.R @@ -78,7 +78,7 @@ test_that("status method works", { test_that("TerminatorEvals works with empty archive", { terminator = TerminatorEvals$new() - archive = Archive$new(ps(x = p_dbl()), ps(y = p_dbl(tags = "minimize"))) + archive = ArchiveBatch$new(ps(x = p_dbl()), ps(y = p_dbl(tags = "minimize"))) expect_false(terminator$is_terminated(archive)) }) diff --git a/tests/testthat/test_TerminatorNone.R b/tests/testthat/test_TerminatorNone.R index 36db356e9..e33767301 100644 --- a/tests/testthat/test_TerminatorNone.R +++ b/tests/testthat/test_TerminatorNone.R @@ -11,7 +11,7 @@ test_that("TerminatorNone works", { test_that("TerminatorNone works with empty archive", { terminator = TerminatorNone$new() - archive = Archive$new(ps(x = p_dbl()), ps(y = p_dbl(tags = "minimize"))) + archive = ArchiveBatch$new(ps(x = p_dbl()), ps(y = p_dbl(tags = "minimize"))) expect_false(terminator$is_terminated(archive)) }) diff --git a/tests/testthat/test_TerminatorPerfReached.R b/tests/testthat/test_TerminatorPerfReached.R index 81e37a445..9454e27af 100644 --- a/tests/testthat/test_TerminatorPerfReached.R +++ b/tests/testthat/test_TerminatorPerfReached.R @@ -8,14 +8,14 @@ test_that("TerminatorPerfReached works", { expect_true(tail(a$data$y, 1) < 0.2) }) -test_that("TerminatorPerfReached in OptimInstanceMultiCrit throws an error", { +test_that("TerminatorPerfReached in OptimInstanceBatchMultiCrit throws an error", { terminator = TerminatorPerfReached$new() expect_error(MAKE_INST_2D_2D(terminator)) }) test_that("TerminatorPerfReached works with empty archive", { terminator = TerminatorPerfReached$new() - archive = Archive$new(ps(x = p_dbl()), ps(y = p_dbl(tags = "minimize"))) + archive = ArchiveBatch$new(ps(x = p_dbl()), ps(y = p_dbl(tags = "minimize"))) expect_false(terminator$is_terminated(archive)) }) diff --git a/tests/testthat/test_TerminatorRunTime.R b/tests/testthat/test_TerminatorRunTime.R index 0cdd8e3e4..f73ba4c2f 100644 --- a/tests/testthat/test_TerminatorRunTime.R +++ b/tests/testthat/test_TerminatorRunTime.R @@ -23,7 +23,7 @@ test_that("max and current works", { test_that("TerminatorRunTime works with empty archive", { terminator = TerminatorRunTime$new() - archive = Archive$new(ps(x = p_dbl()), ps(y = p_dbl(tags = "minimize"))) + archive = ArchiveBatch$new(ps(x = p_dbl()), ps(y = p_dbl(tags = "minimize"))) expect_false(terminator$is_terminated(archive)) }) diff --git a/tests/testthat/test_TerminatorStagnation.R b/tests/testthat/test_TerminatorStagnation.R index 98d8b8005..232d181eb 100644 --- a/tests/testthat/test_TerminatorStagnation.R +++ b/tests/testthat/test_TerminatorStagnation.R @@ -8,14 +8,14 @@ test_that("TerminatorStagnation works", { expect_equal(a$n_evals, 11) }) -test_that("TerminatorStagnation in OptimInstanceMultiCrit throws an error", { +test_that("TerminatorStagnation in OptimInstanceBatchMultiCrit throws an error", { terminator = TerminatorStagnation$new() expect_error(MAKE_INST_2D_2D(terminator)) }) test_that("TerminatorStagnation works with empty archive", { terminator = TerminatorStagnation$new() - archive = Archive$new(ps(x = p_dbl()), ps(y = p_dbl(tags = "minimize"))) + archive = ArchiveBatch$new(ps(x = p_dbl()), ps(y = p_dbl(tags = "minimize"))) expect_false(terminator$is_terminated(archive)) }) diff --git a/tests/testthat/test_TerminatorStagnationBatch.R b/tests/testthat/test_TerminatorStagnationBatch.R index bcfffd19b..26d3dc9d1 100644 --- a/tests/testthat/test_TerminatorStagnationBatch.R +++ b/tests/testthat/test_TerminatorStagnationBatch.R @@ -30,14 +30,14 @@ test_that("TerminatorStagnationBatch works with single objective and n = 2", { expect_error(inst$eval_batch(xdt = data.table(x1 = 0, x2 = 0))) }) -test_that("TerminatorStagnationBatch in OptimInstanceMultiCrit throws an error", { +test_that("TerminatorStagnationBatch in OptimInstanceBatchMultiCrit throws an error", { terminator = TerminatorStagnationBatch$new() expect_error(MAKE_INST_2D_2D(terminator)) }) test_that("TerminatorStagnationBatch works with empty archive", { terminator = TerminatorStagnationBatch$new() - archive = Archive$new(ps(x = p_dbl()), ps(y = p_dbl(tags = "minimize"))) + archive = ArchiveBatch$new(ps(x = p_dbl()), ps(y = p_dbl(tags = "minimize"))) expect_false(terminator$is_terminated(archive)) }) diff --git a/tests/testthat/test_bb_optimize.R b/tests/testthat/test_bb_optimize.R index 0dd53f8f9..f3ae1642b 100644 --- a/tests/testthat/test_bb_optimize.R +++ b/tests/testthat/test_bb_optimize.R @@ -12,7 +12,7 @@ test_that("bb_optimize works with function and bounds", { expect_named(res$par, c("x1", "x2")) expect_numeric(res$value) expect_named(res$value, "y1") - expect_r6(res$instance, "OptimInstanceSingleCrit") + expect_r6(res$instance, "OptimInstanceBatchSingleCrit") }) test_that("bb_optimize works with passed arguments to objective function", { @@ -29,7 +29,7 @@ test_that("bb_optimize works with passed arguments to objective function", { expect_named(res$par, c("x1", "x2")) expect_numeric(res$value) expect_named(res$value, "y1") - expect_r6(res$instance, "OptimInstanceSingleCrit") + expect_r6(res$instance, "OptimInstanceBatchSingleCrit") }) test_that("bb_optimize works with optimizer object", { @@ -46,7 +46,7 @@ test_that("bb_optimize works with optimizer object", { expect_named(res$par, c("x1", "x2")) expect_numeric(res$value) expect_named(res$value, "y1") - expect_r6(res$instance, "OptimInstanceSingleCrit") + expect_r6(res$instance, "OptimInstanceBatchSingleCrit") }) test_that("bb_optimize works with function and named bounds", { @@ -63,7 +63,7 @@ test_that("bb_optimize works with function and named bounds", { expect_named(res$par, c("z1", "z2")) expect_numeric(res$value) expect_named(res$value, "y1") - expect_r6(res$instance, "OptimInstanceSingleCrit") + expect_r6(res$instance, "OptimInstanceBatchSingleCrit") }) test_that("bb_optimize works with named codomain", { @@ -80,7 +80,7 @@ test_that("bb_optimize works with named codomain", { expect_named(res$par, c("x1", "x2")) expect_numeric(res$value) expect_named(res$value, "z") - expect_r6(res$instance, "OptimInstanceSingleCrit") + expect_r6(res$instance, "OptimInstanceBatchSingleCrit") }) test_that("bb_optimize works with objective", { @@ -100,7 +100,7 @@ test_that("bb_optimize works with objective", { expect_named(res$par, c("x1", "x2")) expect_numeric(res$value) expect_named(res$value, "z") - expect_r6(res$instance, "OptimInstanceSingleCrit") + expect_r6(res$instance, "OptimInstanceBatchSingleCrit") }) @@ -119,7 +119,7 @@ test_that("bb_optimize works with function and bounds", { expect_named(res$par, c("x1", "x2")) expect_numeric(res$value) expect_named(res$value, "y1") - expect_r6(res$instance, "OptimInstanceSingleCrit") + expect_r6(res$instance, "OptimInstanceBatchSingleCrit") }) @@ -144,5 +144,5 @@ test_that("bb_optimize works with objective", { expect_named(res$par, c("x1", "x2")) expect_numeric(res$value) expect_named(res$value, "z") - expect_r6(res$instance, "OptimInstanceSingleCrit") + expect_r6(res$instance, "OptimInstanceBatchSingleCrit") }) diff --git a/tests/testthat/test_mlr_callbacks.R b/tests/testthat/test_mlr_callbacks.R index 413f47cdb..689dd7ab5 100644 --- a/tests/testthat/test_mlr_callbacks.R +++ b/tests/testthat/test_mlr_callbacks.R @@ -1,7 +1,7 @@ -test_that("backup callback works", { +test_that("backup batch callback works", { on.exit(unlink("./archive.rds")) - instance = OptimInstanceSingleCrit$new( + instance = OptimInstanceBatchSingleCrit$new( objective = OBJ_1D, search_space = PS_1D, terminator = trm("evals", n_evals = 10), @@ -14,3 +14,40 @@ test_that("backup callback works", { expect_file_exists("./archive.rds") expect_data_table(readRDS("./archive.rds")) }) + +test_that("async callback works", { + skip_if(TRUE) # Does not work in testthat environment + skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + rush_plan(n_workers = 2) + + callback = callback_async("bbotk.test", + on_worker_begin = function(callback, context) { + key = context$instance$archive$push_running_point(list(x = 1)) + context$instance$archive$push_result(key, list(y = 1), list(x = 1)) + }, + + on_worker_end = function(callback, context) { + key = context$instance$archive$push_running_point(list(x = 2)) + context$instance$archive$push_result(key, list(y = 2), list(x = 2)) + } + ) + + instance = oi_async( + objective = OBJ_1D, + search_space = PS_1D, + terminator = trm("evals", n_evals = 10), + callbacks = callback, + ) + + optimizer = opt("async_random_search") + optimizer$optimize(instance) + + Sys.sleep(1) + + x = instance$archive$data$x + expect_equal(head(x, 2), c(1, 1)) + # expect_equal(tail(x, 2), c(2, 2)) +}) diff --git a/tests/testthat/test_mlr_optimizers.R b/tests/testthat/test_mlr_optimizers.R index 0b0a8818d..46242863f 100644 --- a/tests/testthat/test_mlr_optimizers.R +++ b/tests/testthat/test_mlr_optimizers.R @@ -4,7 +4,7 @@ test_that("mlr_optimizers", { for (key in keys) { optimizer = opt(key) - expect_r6(optimizer, "Optimizer") + expect_multi_class(optimizer, c("Optimizer", "OptimizerAsync")) } }) @@ -16,5 +16,5 @@ test_that("mlr_optimizers sugar", { test_that("as.data.table objects parameter", { tab = as.data.table(mlr_optimizers, objects = TRUE) expect_data_table(tab) - expect_list(tab$object, "Optimizer", any.missing = FALSE) + expect_list(tab$object, any.missing = FALSE) }) diff --git a/tests/testthat/test_nds_selection.R b/tests/testthat/test_nds_selection.R index a31d44fed..3c9ab6502 100644 --- a/tests/testthat/test_nds_selection.R +++ b/tests/testthat/test_nds_selection.R @@ -82,7 +82,7 @@ test_that("nds_selection in Archive works", { y1 = p_dbl(tags = "minimize"), y2 = p_dbl(tags = "minimize") ) - a = Archive$new(domain, codomain) + a = ArchiveBatch$new(domain, codomain) # misuse x1 to identify the points xdt = data.table(x1 = seq_len(8)) diff --git a/vignettes/bbotk.Rmd b/vignettes/bbotk.Rmd index 173901ecd..966f8e863 100644 --- a/vignettes/bbotk.Rmd +++ b/vignettes/bbotk.Rmd @@ -114,7 +114,7 @@ term_combo = TerminatorCombo$new(terminators = terminators) Before we finally start the optimization, we have to create an `OptimInstance` that contains also the `Objective` and the `Terminator`. ```{r} -instance = OptimInstanceSingleCrit$new(objective = obfun, terminator = term_combo) +instance = OptimInstanceBatchSingleCrit$new(objective = obfun, terminator = term_combo) instance ``` @@ -207,7 +207,7 @@ search_space = ps( Instead of the domain of the `Objective` we now use our constructed `search_space` that includes the `trafo` for the `OptimInstance`. ```{r} -inst = OptimInstanceSingleCrit$new( +inst = OptimInstanceBatchSingleCrit$new( objective = obj, search_space = search_space, terminator = trm("evals", n_evals = 30)