From 092985d0c6a0883ec5cdd2826d50ed38dbeea1e3 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 27 Mar 2022 00:20:04 +0100 Subject: [PATCH 001/117] Move ensemble_bulder test data to named folder --- .../.auto-sklearn/predictions_ensemble_true.npy | Bin 160 -> 0 bytes .../runs/0_1_0.0/predictions_ensemble_0_1_0.0.npy | Bin 160 -> 0 bytes .../runs/0_1_0.0/predictions_test_0_1_0.0.npy | Bin 160 -> 0 bytes .../runs/0_1_0.0/predictions_valid_0_1_0.0.npy | Bin 160 -> 0 bytes .../runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy | Bin 160 -> 0 bytes .../runs/0_2_0.0/predictions_test_0_2_0.0.npy | Bin 160 -> 0 bytes .../runs/0_2_0.0/predictions_valid_0_2_0.0.npy | Bin 160 -> 0 bytes .../0_3_100.0/predictions_ensemble_0_3_100.0.npy | Bin 160 -> 0 bytes .../runs/0_3_100.0/predictions_test_0_3_100.0.npy | Bin 160 -> 0 bytes .../0_3_100.0/predictions_valid_0_3_100.0.npy | Bin 160 -> 0 bytes .../.auto-sklearn/runs/0_1_0.0/0.1.0.0.model | 0 .../.auto-sklearn/runs/0_2_0.0/0.2.0.0.model | 0 .../runs/0_2_0.0/predictions_test_0_2_0.0.np | Bin .../.auto-sklearn/runs/0_3_100.0/0.3.0.0.model | 0 .../.auto-sklearn/runs/0_3_100.0/0.3.100.0.model | 0 15 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 test/test_ensemble_builder/data/.auto-sklearn/predictions_ensemble_true.npy delete mode 100644 test/test_ensemble_builder/data/.auto-sklearn/runs/0_1_0.0/predictions_ensemble_0_1_0.0.npy delete mode 100644 test/test_ensemble_builder/data/.auto-sklearn/runs/0_1_0.0/predictions_test_0_1_0.0.npy delete mode 100644 test/test_ensemble_builder/data/.auto-sklearn/runs/0_1_0.0/predictions_valid_0_1_0.0.npy delete mode 100644 test/test_ensemble_builder/data/.auto-sklearn/runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy delete mode 100644 test/test_ensemble_builder/data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.npy delete mode 100644 test/test_ensemble_builder/data/.auto-sklearn/runs/0_2_0.0/predictions_valid_0_2_0.0.npy delete mode 100644 test/test_ensemble_builder/data/.auto-sklearn/runs/0_3_100.0/predictions_ensemble_0_3_100.0.npy delete mode 100644 test/test_ensemble_builder/data/.auto-sklearn/runs/0_3_100.0/predictions_test_0_3_100.0.npy delete mode 100644 test/test_ensemble_builder/data/.auto-sklearn/runs/0_3_100.0/predictions_valid_0_3_100.0.npy rename test/test_ensemble_builder/{data => toy_data}/.auto-sklearn/runs/0_1_0.0/0.1.0.0.model (100%) rename test/test_ensemble_builder/{data => toy_data}/.auto-sklearn/runs/0_2_0.0/0.2.0.0.model (100%) rename test/test_ensemble_builder/{data => toy_data}/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.np (100%) rename test/test_ensemble_builder/{data => toy_data}/.auto-sklearn/runs/0_3_100.0/0.3.0.0.model (100%) rename test/test_ensemble_builder/{data => toy_data}/.auto-sklearn/runs/0_3_100.0/0.3.100.0.model (100%) diff --git a/test/test_ensemble_builder/data/.auto-sklearn/predictions_ensemble_true.npy b/test/test_ensemble_builder/data/.auto-sklearn/predictions_ensemble_true.npy deleted file mode 100644 index fee3160c86d8995cb5ece8126aae88f13a964629..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g diff --git a/test/test_ensemble_builder/data/.auto-sklearn/runs/0_1_0.0/predictions_ensemble_0_1_0.0.npy b/test/test_ensemble_builder/data/.auto-sklearn/runs/0_1_0.0/predictions_ensemble_0_1_0.0.npy deleted file mode 100644 index 1b2320113d4ffe309dff0f30b4adb5c434b84d52..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= eXCxM+0{I%IItoUbItsN4aKOa?1&lBTg?s>2!WBvY diff --git a/test/test_ensemble_builder/data/.auto-sklearn/runs/0_1_0.0/predictions_test_0_1_0.0.npy b/test/test_ensemble_builder/data/.auto-sklearn/runs/0_1_0.0/predictions_test_0_1_0.0.npy deleted file mode 100644 index 1b2320113d4ffe309dff0f30b4adb5c434b84d52..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= eXCxM+0{I%IItoUbItsN4aKOa?1&lBTg?s>2!WBvY diff --git a/test/test_ensemble_builder/data/.auto-sklearn/runs/0_1_0.0/predictions_valid_0_1_0.0.npy b/test/test_ensemble_builder/data/.auto-sklearn/runs/0_1_0.0/predictions_valid_0_1_0.0.npy deleted file mode 100644 index 1b2320113d4ffe309dff0f30b4adb5c434b84d52..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= eXCxM+0{I%IItoUbItsN4aKOa?1&lBTg?s>2!WBvY diff --git a/test/test_ensemble_builder/data/.auto-sklearn/runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy b/test/test_ensemble_builder/data/.auto-sklearn/runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy deleted file mode 100644 index fee3160c86d8995cb5ece8126aae88f13a964629..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g diff --git a/test/test_ensemble_builder/data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.npy b/test/test_ensemble_builder/data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.npy deleted file mode 100644 index fee3160c86d8995cb5ece8126aae88f13a964629..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g diff --git a/test/test_ensemble_builder/data/.auto-sklearn/runs/0_2_0.0/predictions_valid_0_2_0.0.npy b/test/test_ensemble_builder/data/.auto-sklearn/runs/0_2_0.0/predictions_valid_0_2_0.0.npy deleted file mode 100644 index fee3160c86d8995cb5ece8126aae88f13a964629..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g diff --git a/test/test_ensemble_builder/data/.auto-sklearn/runs/0_3_100.0/predictions_ensemble_0_3_100.0.npy b/test/test_ensemble_builder/data/.auto-sklearn/runs/0_3_100.0/predictions_ensemble_0_3_100.0.npy deleted file mode 100644 index fee3160c86d8995cb5ece8126aae88f13a964629..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g diff --git a/test/test_ensemble_builder/data/.auto-sklearn/runs/0_3_100.0/predictions_test_0_3_100.0.npy b/test/test_ensemble_builder/data/.auto-sklearn/runs/0_3_100.0/predictions_test_0_3_100.0.npy deleted file mode 100644 index fee3160c86d8995cb5ece8126aae88f13a964629..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g diff --git a/test/test_ensemble_builder/data/.auto-sklearn/runs/0_3_100.0/predictions_valid_0_3_100.0.npy b/test/test_ensemble_builder/data/.auto-sklearn/runs/0_3_100.0/predictions_valid_0_3_100.0.npy deleted file mode 100644 index fee3160c86d8995cb5ece8126aae88f13a964629..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g diff --git a/test/test_ensemble_builder/data/.auto-sklearn/runs/0_1_0.0/0.1.0.0.model b/test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_1_0.0/0.1.0.0.model similarity index 100% rename from test/test_ensemble_builder/data/.auto-sklearn/runs/0_1_0.0/0.1.0.0.model rename to test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_1_0.0/0.1.0.0.model diff --git a/test/test_ensemble_builder/data/.auto-sklearn/runs/0_2_0.0/0.2.0.0.model b/test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_2_0.0/0.2.0.0.model similarity index 100% rename from test/test_ensemble_builder/data/.auto-sklearn/runs/0_2_0.0/0.2.0.0.model rename to test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_2_0.0/0.2.0.0.model diff --git a/test/test_ensemble_builder/data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.np b/test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.np similarity index 100% rename from test/test_ensemble_builder/data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.np rename to test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.np diff --git a/test/test_ensemble_builder/data/.auto-sklearn/runs/0_3_100.0/0.3.0.0.model b/test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_3_100.0/0.3.0.0.model similarity index 100% rename from test/test_ensemble_builder/data/.auto-sklearn/runs/0_3_100.0/0.3.0.0.model rename to test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_3_100.0/0.3.0.0.model diff --git a/test/test_ensemble_builder/data/.auto-sklearn/runs/0_3_100.0/0.3.100.0.model b/test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_3_100.0/0.3.100.0.model similarity index 100% rename from test/test_ensemble_builder/data/.auto-sklearn/runs/0_3_100.0/0.3.100.0.model rename to test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_3_100.0/0.3.100.0.model From 83db9cf921bc09155f2e5b1b8e9f5a4248cd3296 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 27 Mar 2022 00:21:04 +0100 Subject: [PATCH 002/117] Update backend to take a temlate to copy from --- test/fixtures/backend.py | 21 ++++++++-- test/fixtures/datasets.py | 10 ++++- test/test_ensemble_builder/cases.py | 63 +++++++++++++++++++++++++++++ 3 files changed, 89 insertions(+), 5 deletions(-) create mode 100644 test/test_ensemble_builder/cases.py diff --git a/test/fixtures/backend.py b/test/fixtures/backend.py index 3ee4626199..5557b34a96 100644 --- a/test/fixtures/backend.py +++ b/test/fixtures/backend.py @@ -1,4 +1,5 @@ -from typing import Callable, Union +from typing import Callable, Union, Optional +from distutils.dir_util import copy_tree import os from pathlib import Path @@ -7,6 +8,9 @@ from pytest import fixture +HERE = Path(__file__).parent.resolve() +DATAPATH = HERE.parent / "data" + # TODO Update to return path once everything can use a path @fixture @@ -34,15 +38,21 @@ def make_backend() -> Callable[..., Backend]: path: Union[str, Path] The path to place the backend at + template: Optional[Path] = None + Setup with a pre-existing layout if not None + Returns ------- Backend The created backend object """ # TODO redo once things use paths - def _make(path: Union[str, Path]) -> Backend: + def _make( + path: Union[str, Path], + template: Optional[Path] = None, + ) -> Backend: _path = Path(path) if not isinstance(path, Path) else path - assert not _path.exists() + assert not _path.exists(), "Try passing path / 'backend'" backend = create( temporary_directory=str(_path), @@ -50,6 +60,11 @@ def _make(path: Union[str, Path]) -> Backend: prefix="auto-sklearn", ) + if template is not None: + assert template.exists() + dest = Path(backend.temporary_directory) + copy_tree(str(template), str(dest)) + return backend return _make diff --git a/test/fixtures/datasets.py b/test/fixtures/datasets.py index 39d948e5a9..d79a228c23 100644 --- a/test/fixtures/datasets.py +++ b/test/fixtures/datasets.py @@ -41,7 +41,6 @@ def astype( @fixture def make_sklearn_dataset() -> Callable: """ - Parameters ---------- name : str = "iris" @@ -62,6 +61,12 @@ def make_sklearn_dataset() -> Callable: make_binary : bool = False Whether to force the data into being binary + task: Optional[int] = None + The task of the data, required for the datamanager + + feat_type: Optional[Dict | str] = None + The features types for the data if making a XYDataManager + as_datamanager: bool = False Wether to return the information as an XYDataManager @@ -77,9 +82,9 @@ def _make( train_size_maximum: int = 150, make_multilabel: bool = False, make_binary: bool = False, - as_datamanager: bool = False, task: Optional[int] = None, feat_type: Optional[Dict | str] = None, + as_datamanager: bool = False, ) -> Any: X, y, Xt, yt = get_dataset( dataset=name, @@ -93,6 +98,7 @@ def _make( if not as_datamanager: return (X, y, Xt, yt) else: + assert task is not None and feat_type is not None if isinstance(feat_type, str): feat_type = {i: feat_type for i in range(X.shape[1])} diff --git a/test/test_ensemble_builder/cases.py b/test/test_ensemble_builder/cases.py new file mode 100644 index 0000000000..9761eed1f6 --- /dev/null +++ b/test/test_ensemble_builder/cases.py @@ -0,0 +1,63 @@ +from typing import Callable + +import pickle +from pathlib import Path + +import numpy as np + +from autosklearn.automl_common.common.utils.backend import Backend +from autosklearn.constants import BINARY_CLASSIFICATION +from autosklearn.data.xy_data_manager import XYDataManager + +from pytest_cases import case + +HERE = Path(__file__).parent.resolve() + + +@case(tags=["backend", "setup_3_models"]) +def case_backend_setup_3_models( + tmp_path: Path, + make_backend: Callable[..., Backend], + make_sklearn_dataset: Callable[..., XYDataManager], +) -> Backend: + """See the contents of TOY_DATA for full details + + /toy_data + /.auto-sklearn + /runs + /0_1_0.0 + /0_2_0.0 + /0_3_100.0 + /datamanger.pkl + /predictions_ensemble_targets.npy + /true_targets_ensemble.npy # Same file as predictions_ensemble_targets + """ + path = tmp_path / "backend" + TOY_DATA = HERE / "toy_data" + + # Create the datamanager that was used if needed + dm_path = TOY_DATA / ".auto-sklearn" / "datamanager.pkl" + + if not dm_path.exists(): + datamanager = make_sklearn_dataset( + name="breast_cancer", + task=BINARY_CLASSIFICATION, + feat_type="numerical", # They're all numerical + as_datamanager=True, + ) + + # For some reason, the old mock was just returning this array as: + # + # datamanger.data.get.return_value = array + # + model_3_path = TOY_DATA / ".auto-sklearn" / "runs" / "0_3_100.0" + test_preds = model_3_path / "predictions_test_0_3_100.0.npy" + array = np.load(test_preds) + + datamanager.data["Y_valid"] = array + datamanager.data["Y_test"] = array + + with dm_path.open("wb") as f: + pickle.dump(datamanager, f) + + return make_backend(path=path, template=TOY_DATA) From dc4585ebe9c05fd53ac489ff7c341ff8bc2c1905 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 27 Mar 2022 00:21:17 +0100 Subject: [PATCH 003/117] Update tests to use new cases system --- autosklearn/ensemble_builder.py | 32 +- test/test_ensemble_builder/ensemble_utils.py | 116 ---- test/test_ensemble_builder/test_ensemble.py | 609 ++++++++++++------- 3 files changed, 398 insertions(+), 359 deletions(-) delete mode 100644 test/test_ensemble_builder/ensemble_utils.py diff --git a/autosklearn/ensemble_builder.py b/autosklearn/ensemble_builder.py index 3707ce84c9..c17c23a4e2 100644 --- a/autosklearn/ensemble_builder.py +++ b/autosklearn/ensemble_builder.py @@ -1,4 +1,4 @@ -# -*- encoding: utf-8 -*- +from __future__ import annotations from typing import List, Optional, Tuple, Union import glob @@ -52,13 +52,13 @@ def __init__( metric: Scorer, ensemble_size: int, ensemble_nbest: int, - max_models_on_disc: Union[float, int], seed: int, precision: int, max_iterations: Optional[int], read_at_most: int, ensemble_memory_limit: Optional[int], random_state: Union[int, np.random.RandomState], + max_models_on_disc: Optional[float | int] = 100, logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, pynisher_context: str = "fork", ): @@ -95,7 +95,7 @@ def __init__( Both wrt to validation predictions If performance_range_threshold > 0, might return less models - max_models_on_disc: int + max_models_on_disc: Optional[int | float] = 100 Defines the maximum number of models that are kept in the disc. If int, it must be greater or equal than 1, and dictates the max @@ -287,7 +287,6 @@ def fit_and_return_ensemble( metric: Scorer, ensemble_size: int, ensemble_nbest: int, - max_models_on_disc: Union[float, int], seed: int, precision: int, read_at_most: int, @@ -295,6 +294,7 @@ def fit_and_return_ensemble( iteration: int, return_predictions: bool, pynisher_context: str, + max_models_on_disc: Optional[Union[float, int]] = 100, logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, unit_test: bool = False, memory_limit: Optional[int] = None, @@ -334,7 +334,7 @@ def fit_and_return_ensemble( Both wrt to validation predictions If performance_range_threshold > 0, might return less models - max_models_on_disc: int + max_models_on_disc: Optional[int | float] = 100 Defines the maximum number of models that are kept in the disc. If int, it must be greater or equal than 1, and dictates the max number of @@ -422,7 +422,7 @@ def __init__( metric: Scorer, ensemble_size: int = 10, ensemble_nbest: Union[int, float] = 100, - max_models_on_disc: int = 100, + max_models_on_disc: Optional[int | float] = 100, performance_range_threshold: float = 0, seed: int = 1, precision: int = 32, @@ -452,7 +452,7 @@ def __init__( if float: consider only this fraction of the best models Both with respect to the validation predictions If performance_range_threshold > 0, might return less models - max_models_on_disc: int = 100 + max_models_on_disc: Optional[int | float] = 100 Defines the maximum number of models that are kept in the disc. If int, it must be greater or equal than 1, and dictates the max number of models to keep. @@ -486,7 +486,6 @@ def __init__( way to make unittest.mock work through the pynisher with all spawn contexts. If you know a better solution, please let us know by opening an issue. """ - super(EnsembleBuilder, self).__init__() self.backend = backend # communication with filesystem @@ -904,10 +903,20 @@ def get_disk_consumption(self, pred_path): # get the megabytes return round(this_model_cost / math.pow(1024, 2), 2) - def compute_loss_per_model(self): - """ - Compute the loss of the predictions on ensemble building data set; + def compute_loss_per_model(self) -> bool: + """Compute the loss of the predictions on ensemble building data set; populates self.read_preds and self.read_losses + + Side-effects + ------------ + * Populates + - `self.y_ens_files` all the ensemble predictions it could find for runs + - `self.read_losses` with the new losses it calculated + + Returns + ------- + bool + Whether it successfully computed losses """ self.logger.debug("Read ensemble data set predictions") @@ -1234,6 +1243,7 @@ def get_valid_test_preds( self, selected_keys: List[str] ) -> Tuple[List[str], List[str]]: """Get valid and test predictions from disc and store them in self.read_preds + Parameters --------- selected_keys: list diff --git a/test/test_ensemble_builder/ensemble_utils.py b/test/test_ensemble_builder/ensemble_utils.py deleted file mode 100644 index 7a3cd7f252..0000000000 --- a/test/test_ensemble_builder/ensemble_utils.py +++ /dev/null @@ -1,116 +0,0 @@ -import os -import shutil - -import numpy as np - -from autosklearn.automl_common.common.ensemble_building.abstract_ensemble import ( - AbstractEnsemble, -) -from autosklearn.ensemble_builder import EnsembleBuilder -from autosklearn.metrics import make_scorer - -import unittest -import unittest.mock - - -def scorer_function(a, b): - return 0.9 - - -MockMetric = make_scorer("mock", scorer_function) - - -class BackendMock(object): - def __init__(self, target_directory): - this_directory = os.path.abspath(os.path.dirname(__file__)) - shutil.copytree( - os.path.join(this_directory, "data"), os.path.join(target_directory) - ) - self.temporary_directory = target_directory - self.internals_directory = os.path.join( - self.temporary_directory, ".auto-sklearn" - ) - - def load_datamanager(self): - manager = unittest.mock.Mock() - manager.__reduce__ = lambda self: (unittest.mock.MagicMock, ()) - array = np.load( - os.path.join( - self.temporary_directory, - ".auto-sklearn", - "runs", - "0_3_100.0", - "predictions_test_0_3_100.0.npy", - ) - ) - manager.data.get.return_value = array - return manager - - def load_targets_ensemble(self): - with open( - os.path.join( - self.temporary_directory, - ".auto-sklearn", - "predictions_ensemble_true.npy", - ), - "rb", - ) as fp: - y = np.load(fp, allow_pickle=True) - return y - - def save_ensemble(self, ensemble, index_run, seed): - return - - def save_predictions_as_txt(self, predictions, subset, idx, prefix, precision): - return - - def get_runs_directory(self) -> str: - return os.path.join(self.temporary_directory, ".auto-sklearn", "runs") - - def get_numrun_directory(self, seed: int, num_run: int, budget: float) -> str: - return os.path.join( - self.get_runs_directory(), "%d_%d_%s" % (seed, num_run, budget) - ) - - def get_model_filename(self, seed: int, idx: int, budget: float) -> str: - return "%s.%s.%s.model" % (seed, idx, budget) - - -def compare_read_preds(read_preds1, read_preds2): - """ - compares read_preds attribute. An alternative to - assert Dict Equal as it contains np arrays, so we have - to use np testing utilities accordingly - """ - - # Both arrays should have the same splits - assert set(read_preds1.keys()) == set(read_preds2.keys()) - - for k, v in read_preds1.items(): - - # Each split should have the same elements - assert set(read_preds1[k].keys()) == set(read_preds2[k].keys()) - - # This level contains the scores/ensmebles/etc - for actual_k, actual_v in read_preds1[k].items(): - - # If it is a numpy array, make sure it is the same - if type(actual_v) is np.ndarray: - np.testing.assert_array_equal(actual_v, read_preds2[k][actual_k]) - else: - assert actual_v == read_preds2[k][actual_k] - - -class EnsembleBuilderMemMock(EnsembleBuilder): - def fit_ensemble(self, selected_keys): - return True - - def predict( - self, - set_: str, - ensemble: AbstractEnsemble, - selected_keys: list, - n_preds: int, - index_run: int, - ): - np.ones([10000000, 1000000]) diff --git a/test/test_ensemble_builder/test_ensemble.py b/test/test_ensemble_builder/test_ensemble.py index 469f617fb0..a41d90f560 100644 --- a/test/test_ensemble_builder/test_ensemble.py +++ b/test/test_ensemble_builder/test_ensemble.py @@ -1,7 +1,7 @@ +from __future__ import annotations + import os import pickle -import shutil -import sys import time import dask.distributed @@ -9,6 +9,10 @@ import pandas as pd from smac.runhistory.runhistory import RunHistory, RunKey, RunValue +from autosklearn.automl_common.common.ensemble_building.abstract_ensemble import ( + AbstractEnsemble, +) +from autosklearn.automl_common.common.utils.backend import Backend from autosklearn.constants import BINARY_CLASSIFICATION, MULTILABEL_CLASSIFICATION from autosklearn.ensemble_builder import ( Y_ENSEMBLE, @@ -18,105 +22,28 @@ EnsembleBuilderManager, ) from autosklearn.ensembles.singlebest_ensemble import SingleBest -from autosklearn.metrics import accuracy, log_loss, roc_auc - -import pytest -import unittest.mock - -this_directory = os.path.dirname(__file__) -sys.path.append(this_directory) -from ensemble_utils import ( # noqa (E402: module level import not at top of file) - BackendMock, - EnsembleBuilderMemMock, - MockMetric, - compare_read_preds, -) - - -@pytest.fixture(scope="function") -def ensemble_backend(request): - test_id = "%s_%s" % (request.module.__name__, request.node.name) - test_dir = os.path.join(this_directory, test_id) - - try: - shutil.rmtree(test_dir) - except: # noqa E722 - pass - - # Make sure the folders we wanna create do not already exist. - backend = BackendMock(test_dir) +from autosklearn.metrics import Scorer, accuracy, log_loss, make_scorer, roc_auc - def get_finalizer(ensemble_backend): - def session_run_at_end(): - try: - shutil.rmtree(test_dir) - except: # noqa E722 - pass +from pytest_cases import fixture, parametrize, parametrize_with_cases +from unittest.mock import Mock, patch - return session_run_at_end +import test.test_ensemble_builder.cases as cases +from test.conftest import DEFAULT_SEED - request.addfinalizer(get_finalizer(backend)) - - return backend - - -@pytest.fixture(scope="function") -def ensemble_run_history(request): - - run_history = RunHistory() - run_history._add( - RunKey( - config_id=3, instance_id='{"task_id": "breast_cancer"}', seed=1, budget=3.0 - ), - RunValue( - cost=0.11347517730496459, - time=0.21858787536621094, - status=None, - starttime=time.time(), - endtime=time.time(), - additional_info={ - "duration": 0.20323538780212402, - "num_run": 3, - "configuration_origin": "Random Search", - }, - ), - status=None, - origin=None, - ) - run_history._add( - RunKey( - config_id=6, instance_id='{"task_id": "breast_cancer"}', seed=1, budget=6.0 - ), - RunValue( - cost=2 * 0.11347517730496459, - time=2 * 0.21858787536621094, - status=None, - starttime=time.time(), - endtime=time.time(), - additional_info={ - "duration": 0.20323538780212402, - "num_run": 6, - "configuration_origin": "Random Search", - }, - ), - status=None, - origin=None, - ) - return run_history - - -def testRead(ensemble_backend): +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_read(ensemble_backend: Backend) -> None: ensbuilder = EnsembleBuilder( backend=ensemble_backend, dataset_name="TEST", task_type=BINARY_CLASSIFICATION, metric=roc_auc, - seed=0, # important to find the test files + seed=DEFAULT_SEED, # important to find the test files ) success = ensbuilder.compute_loss_per_model() - assert success, str(ensbuilder.read_preds) + assert success, f"read_preds = {str(ensbuilder.read_preds)}" + assert len(ensbuilder.read_preds) == 3, ensbuilder.read_preds.keys() assert len(ensbuilder.read_losses) == 3, ensbuilder.read_losses.keys() @@ -133,8 +60,8 @@ def testRead(ensemble_backend): assert ensbuilder.read_losses[filename]["ens_loss"] == 0.0 -@pytest.mark.parametrize( - "ensemble_nbest,max_models_on_disc,exp", +@parametrize( + "ensemble_nbest, max_models_on_disc, expected", ( (1, None, 1), (1.0, None, 2), @@ -144,13 +71,41 @@ def testRead(ensemble_backend): (2, 1, 1), ), ) -def testNBest(ensemble_backend, ensemble_nbest, max_models_on_disc, exp): +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_nbest( + ensemble_backend: Backend, + ensemble_nbest: int | float, + max_models_on_disc: int | None, + expected: int, +) -> None: + """ + Parameters + ---------- + ensemble_backend: Backend + The backend to use. In this case, we specifically rely on the `setup_3_models` + setup. + + ensemble_nbest: int | float + The parameter to use for consider the n best, int being absolute and float being + fraction. + + max_models_on_disc: int | None + The maximum amount of models to keep on disk + + expected: int + The number of keys expected to be selected + + Expects + ------- + * get_n_best_preds should contain 2 keys + * The first key should be model 0_2_0_0 + """ ensbuilder = EnsembleBuilder( backend=ensemble_backend, dataset_name="TEST", task_type=BINARY_CLASSIFICATION, metric=roc_auc, - seed=0, # important to find the test files + seed=DEFAULT_SEED, # important to find the test files ensemble_nbest=ensemble_nbest, max_models_on_disc=max_models_on_disc, ) @@ -158,7 +113,7 @@ def testNBest(ensemble_backend, ensemble_nbest, max_models_on_disc, exp): ensbuilder.compute_loss_per_model() sel_keys = ensbuilder.get_n_best_preds() - assert len(sel_keys) == exp + assert len(sel_keys) == expected fixture = os.path.join( ensemble_backend.temporary_directory, @@ -167,8 +122,8 @@ def testNBest(ensemble_backend, ensemble_nbest, max_models_on_disc, exp): assert sel_keys[0] == fixture -@pytest.mark.parametrize( - "test_case,exp", +@parametrize( + "max_models_on_disc, expected", [ # If None, no reduction (None, 2), @@ -185,26 +140,47 @@ def testNBest(ensemble_backend, ensemble_nbest, max_models_on_disc, exp): (9999.0, 2), ], ) -def testMaxModelsOnDisc(ensemble_backend, test_case, exp): - ensemble_nbest = 4 +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_max_models_on_disc( + ensemble_backend: Backend, + max_models_on_disc: int | float, + expected: int, +) -> None: + """ + Parameters + ---------- + ensemble_backend : Backend + The backend to use, relies on setup_3_models + + max_models_on_disc : int | float + The max_models_on_disc param to use + + expected : int + The expected number of selected models + + Expects + ------- + * The number of selected models should be as expected + """ ensbuilder = EnsembleBuilder( backend=ensemble_backend, dataset_name="TEST", task_type=BINARY_CLASSIFICATION, metric=roc_auc, seed=0, # important to find the test files - ensemble_nbest=ensemble_nbest, - max_models_on_disc=test_case, + ensemble_nbest=4, + max_models_on_disc=max_models_on_disc, ) - with unittest.mock.patch("os.path.getsize") as mock: + with patch("os.path.getsize") as mock: mock.return_value = 100 * 1024 * 1024 ensbuilder.compute_loss_per_model() sel_keys = ensbuilder.get_n_best_preds() - assert len(sel_keys) == exp, test_case + assert len(sel_keys) == expected -def testMaxModelsOnDisc2(ensemble_backend): +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_max_models_on_disc_2(ensemble_backend: Backend) -> None: # Test for Extreme scenarios # Make sure that the best predictions are kept ensbuilder = EnsembleBuilder( @@ -212,40 +188,103 @@ def testMaxModelsOnDisc2(ensemble_backend): dataset_name="TEST", task_type=BINARY_CLASSIFICATION, metric=roc_auc, - seed=0, # important to find the test files + seed=DEFAULT_SEED, # important to find the test files ensemble_nbest=50, max_models_on_disc=10000.0, ) ensbuilder.read_preds = {} - for i in range(50): - ensbuilder.read_losses["pred" + str(i)] = { - "ens_loss": -i * 10, - "num_run": i, + + for n in range(50): + loss = 10 * -n + ensbuilder.read_losses["pred" + str(n)] = { + "ens_loss": loss, + "num_run": n, "loaded": 1, - "seed": 1, - "disc_space_cost_mb": 50 * i, + "seed": 0, + "disc_space_cost_mb": 50 * n, } - ensbuilder.read_preds["pred" + str(i)] = {Y_ENSEMBLE: True} + ensbuilder.read_preds["pred" + str(n)] = {Y_ENSEMBLE: True} + sel_keys = ensbuilder.get_n_best_preds() assert ["pred49", "pred48", "pred47"] == sel_keys - # Make sure at least one model is kept alive - ensbuilder.max_models_on_disc = 0.0 + +@parametrize("n_models", [50, 10, 2, 1]) +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_max_models_on_disc_preserves_always_preserves_at_least_one_model( + n_models: int, + ensemble_backend: Backend, +) -> None: + """ + Parameters + ---------- + n_models : int + + ensemble_backend : Backend + + """ + ensbuilder = EnsembleBuilder( + backend=ensemble_backend, + dataset_name="TEST", + task_type=BINARY_CLASSIFICATION, + metric=roc_auc, + seed=DEFAULT_SEED, # important to find the test files + ensemble_nbest=50, + max_models_on_disc=0.0, + ) + + read_losses = { + f"pred{n}": { + "ens_loss": 10 * -n, + "num_run": n + 1, + "loaded": 1, + "seed": 0, + "disc_space_cost_mb": 50 * n, + } + for n in range(n_models) + } + best_model = min(read_losses, key=lambda m: read_losses[m]["ens_loss"]) + + ensbuilder.read_losses = read_losses + ensbuilder.read_preds = {f"pred{n}": {Y_ENSEMBLE: True} for n in range(n_models)} + sel_keys = ensbuilder.get_n_best_preds() - assert ["pred49"] == sel_keys + assert [best_model] == sel_keys -@pytest.mark.parametrize( - "performance_range_threshold,exp", - ((0.0, 4), (0.1, 4), (0.3, 3), (0.5, 2), (0.6, 2), (0.8, 1), (1.0, 1), (1, 1)), +@parametrize( + "performance_range_threshold, expected_selected", + ((0.0, 4), (0.1, 4), (0.3, 3), (0.5, 2), (0.6, 2), (0.8, 1), (1.0, 1)), ) -def testPerformanceRangeThreshold(ensemble_backend, performance_range_threshold, exp): +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_performance_range_threshold( + ensemble_backend: Backend, + performance_range_threshold: float, + expected_selected: int, +) -> None: + """ + Parameters + ---------- + ensemble_backend : Backend + The backend to use + + performance_range_threshold : float + THe performance range threshold to use + + expected_selected : int + The number of selected models for there to be + + Expects + ------- + * Expects the given amount of models to be selected given a performance range + threshold. + """ ensbuilder = EnsembleBuilder( backend=ensemble_backend, dataset_name="TEST", task_type=BINARY_CLASSIFICATION, metric=roc_auc, - seed=0, # important to find the test files + seed=DEFAULT_SEED, # important to find the test files ensemble_nbest=100, performance_range_threshold=performance_range_threshold, ) @@ -257,16 +296,16 @@ def testPerformanceRangeThreshold(ensemble_backend, performance_range_threshold, "E": {"ens_loss": -5, "num_run": 5, "loaded": -1, "seed": 1}, } ensbuilder.read_preds = { - key: {key_2: True for key_2 in (Y_ENSEMBLE, Y_VALID, Y_TEST)} - for key in ensbuilder.read_losses + name: {preds_key: True for preds_key in (Y_ENSEMBLE, Y_VALID, Y_TEST)} + for name in ensbuilder.read_losses } - sel_keys = ensbuilder.get_n_best_preds() - assert len(sel_keys) == exp + sel_keys = ensbuilder.get_n_best_preds() + assert len(sel_keys) == expected_selected -@pytest.mark.parametrize( - "performance_range_threshold,ensemble_nbest,exp", +@parametrize( + "performance_range_threshold, ensemble_nbest, expected_selected", ( (0.0, 1, 1), (0.0, 1.0, 4), @@ -278,15 +317,33 @@ def testPerformanceRangeThreshold(ensemble_backend, performance_range_threshold, (1, 1.0, 1), ), ) -def testPerformanceRangeThresholdMaxBest( - ensemble_backend, performance_range_threshold, ensemble_nbest, exp -): +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_performance_range_threshold_with_ensemble_nbest( + ensemble_backend: Backend, + performance_range_threshold: float, + ensemble_nbest: int | float, + expected_selected: int, +) -> None: + """ + Parameters + ---------- + ensemble_backend : Backend + performance_range_threshold : float + ensemble_nbest : int | float + expected_selected : int + The number of models expected to be selected + + Expects + ------- + * Given the setup of params for test_performance_range_threshold and ensemble_nbest, + the expected number of models should be selected. + """ ensbuilder = EnsembleBuilder( backend=ensemble_backend, dataset_name="TEST", task_type=BINARY_CLASSIFICATION, metric=roc_auc, - seed=0, # important to find the test files + seed=DEFAULT_SEED, # important to find the test files ensemble_nbest=ensemble_nbest, performance_range_threshold=performance_range_threshold, max_models_on_disc=None, @@ -299,22 +356,22 @@ def testPerformanceRangeThresholdMaxBest( "E": {"ens_loss": -5, "num_run": 5, "loaded": -1, "seed": 1}, } ensbuilder.read_preds = { - key: {key_2: True for key_2 in (Y_ENSEMBLE, Y_VALID, Y_TEST)} - for key in ensbuilder.read_losses + name: {pred_name: True for pred_name in (Y_ENSEMBLE, Y_VALID, Y_TEST)} + for name in ensbuilder.read_losses } sel_keys = ensbuilder.get_n_best_preds() - assert len(sel_keys) == exp - + assert len(sel_keys) == expected_selected -def testFallBackNBest(ensemble_backend): +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_fall_back_nbest(ensemble_backend: Backend) -> None: ensbuilder = EnsembleBuilder( backend=ensemble_backend, dataset_name="TEST", task_type=BINARY_CLASSIFICATION, metric=roc_auc, - seed=0, # important to find the test files + seed=DEFAULT_SEED, # important to find the test files ensemble_nbest=1, ) @@ -324,96 +381,99 @@ def testFallBackNBest(ensemble_backend): print(ensbuilder.read_losses.keys()) print(ensemble_backend.temporary_directory) - filename = os.path.join( - ensemble_backend.temporary_directory, - ".auto-sklearn/runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy", - ) - ensbuilder.read_losses[filename]["ens_loss"] = -1 - - filename = os.path.join( - ensemble_backend.temporary_directory, - ".auto-sklearn/runs/0_3_100.0/predictions_ensemble_0_3_100.0.npy", - ) - ensbuilder.read_losses[filename]["ens_loss"] = -1 - - filename = os.path.join( - ensemble_backend.temporary_directory, - ".auto-sklearn/runs/0_1_0.0/predictions_ensemble_0_1_0.0.npy", - ) - ensbuilder.read_losses[filename]["ens_loss"] = -1 + for model in ["0_1_0.0", "0_2_0.0", "0_3_100.0"]: + filename = os.path.join( + ensemble_backend.temporary_directory, + f".auto-sklearn/runs/{model}/predictions_ensemble_{model}.npy", + ) + ensbuilder.read_losses[filename]["ens_loss"] = -1 sel_keys = ensbuilder.get_n_best_preds() + best_model = "0_1_0.0" fixture = os.path.join( ensemble_backend.temporary_directory, - ".auto-sklearn/runs/0_1_0.0/predictions_ensemble_0_1_0.0.npy", + f".auto-sklearn/runs/{best_model}/predictions_ensemble_{best_model}.npy", ) + assert len(sel_keys) == 1 assert sel_keys[0] == fixture -def testGetValidTestPreds(ensemble_backend): - +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_get_valid_test_preds(ensemble_backend: Backend) -> None: + """ + Parameters + ---------- + ensemble_backend : Backend + The ensemble backend to use with the setup_3_models setup + """ ensbuilder = EnsembleBuilder( backend=ensemble_backend, dataset_name="TEST", task_type=BINARY_CLASSIFICATION, metric=roc_auc, - seed=0, # important to find the test files + seed=DEFAULT_SEED, # important to find the test files ensemble_nbest=1, ) - ensbuilder.compute_loss_per_model() + # There are 3 models in the setup + # * Run 1 is the dummy run + # * Run 2 and Run 3 share the same predictions + # -> Run 2 is selected with ensemble_nbest = 1 + paths = [ + os.path.join( + ensemble_backend.temporary_directory, + f".auto-sklearn/runs/{model}/predictions_ensemble_{model}.npy", + ) + for model in ["0_1_0.0", "0_2_0.0", "0_3_100.0"] + ] - # d1 is a dummt prediction. d2 and d3 have the same prediction with - # different name. num_run=2 is selected when doing sorted() - d1 = os.path.join( - ensemble_backend.temporary_directory, - ".auto-sklearn/runs/0_1_0.0/predictions_ensemble_0_1_0.0.npy", - ) - d2 = os.path.join( - ensemble_backend.temporary_directory, - ".auto-sklearn/runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy", - ) - d3 = os.path.join( - ensemble_backend.temporary_directory, - ".auto-sklearn/runs/0_3_100.0/predictions_ensemble_0_3_100.0.npy", - ) + ensbuilder.compute_loss_per_model() sel_keys = ensbuilder.get_n_best_preds() assert len(sel_keys) == 1 + ensbuilder.get_valid_test_preds(selected_keys=sel_keys) - # Number of read files should be three and - # predictions_ensemble_0_4_0.0.npy must not be in there - assert len(ensbuilder.read_preds) == 3 - assert ( - os.path.join( - ensemble_backend.temporary_directory, - ".auto-sklearn/runs/0_4_0.0/predictions_ensemble_0_4_0.0.npy", - ) - not in ensbuilder.read_preds - ) + # Number of read files should be three and contain those of the models in the setup + assert set(ensbuilder.read_preds.keys()) == set(paths) + + selected = sel_keys + non_selected = set(paths) - set(sel_keys) # not selected --> should still be None - assert ensbuilder.read_preds[d1][Y_VALID] is None - assert ensbuilder.read_preds[d1][Y_TEST] is None - assert ensbuilder.read_preds[d3][Y_VALID] is None - assert ensbuilder.read_preds[d3][Y_TEST] is None + for key in non_selected: + assert ensbuilder.read_preds[key][Y_VALID] is None + assert ensbuilder.read_preds[key][Y_TEST] is None # selected --> read valid and test predictions - assert ensbuilder.read_preds[d2][Y_VALID] is not None - assert ensbuilder.read_preds[d2][Y_TEST] is not None - + for key in selected: + assert ensbuilder.read_preds[key][Y_VALID] is not None + assert ensbuilder.read_preds[key][Y_TEST] is not None -def testEntireEnsembleBuilder(ensemble_backend): +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_ensemble_builder_predictions(ensemble_backend: Backend) -> None: + """ + Parameters + ---------- + ensemble_backend : Backend + The ensemble backend to use with the setup_3_models setup + + Expects + ------- + * The validation and test sets should both have equal predictions for them? + * Since model 0_2_0.0 has predictions exactly equal to the targets, it should + recieve full weight and that the predictions should be identical to that models + predictions + """ ensbuilder = EnsembleBuilder( backend=ensemble_backend, dataset_name="TEST", task_type=BINARY_CLASSIFICATION, metric=roc_auc, - seed=0, # important to find the test files + seed=DEFAULT_SEED, # important to find the test files ensemble_nbest=2, ) ensbuilder.SAVE2DISC = False @@ -463,14 +523,14 @@ def testEntireEnsembleBuilder(ensemble_backend): np.testing.assert_array_almost_equal(y_valid, y_valid_d2) -def test_main(ensemble_backend): - +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_main(ensemble_backend: Backend) -> None: ensbuilder = EnsembleBuilder( backend=ensemble_backend, dataset_name="TEST", - task_type=MULTILABEL_CLASSIFICATION, # Multilabel Classification + task_type=BINARY_CLASSIFICATION, metric=roc_auc, - seed=0, # important to find the test files + seed=DEFAULT_SEED, # important to find the test files ensemble_nbest=2, max_models_on_disc=None, ) @@ -512,14 +572,15 @@ def test_main(ensemble_backend): ), os.listdir(ensemble_backend.internals_directory) -def test_run_end_at(ensemble_backend): - with unittest.mock.patch("pynisher.enforce_limits") as pynisher_mock: +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_run_end_at(ensemble_backend: Backend) -> None: + with patch("pynisher.enforce_limits") as pynisher_mock: ensbuilder = EnsembleBuilder( backend=ensemble_backend, dataset_name="TEST", - task_type=MULTILABEL_CLASSIFICATION, # Multilabel Classification + task_type=BINARY_CLASSIFICATION, metric=roc_auc, - seed=0, # important to find the test files + seed=DEFAULT_SEED, # important to find the test files ensemble_nbest=2, max_models_on_disc=None, ) @@ -528,24 +589,41 @@ def test_run_end_at(ensemble_backend): current_time = time.time() ensbuilder.run( - end_at=current_time + 10, iteration=1, pynisher_context="forkserver" + end_at=current_time + 10, + iteration=1, + pynisher_context="forkserver", ) # 4 seconds left because: 10 seconds - 5 seconds overhead - little overhead # but then rounded to an integer - assert pynisher_mock.call_args_list[0][1]["wall_time_in_s"], 4 + assert pynisher_mock.call_args_list[0][1]["wall_time_in_s"] == 4 + + +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_limit(ensemble_backend: Backend) -> None: + class EnsembleBuilderMemMock(EnsembleBuilder): + def fit_ensemble(self, selected_keys): + return True + def predict( + self, + set_: str, + ensemble: AbstractEnsemble, + selected_keys: list, + n_preds: int, + index_run: int, + ): + np.ones([10000000, 1000000]) -def testLimit(ensemble_backend): ensbuilder = EnsembleBuilderMemMock( backend=ensemble_backend, dataset_name="TEST", task_type=BINARY_CLASSIFICATION, metric=roc_auc, - seed=0, # important to find the test files + seed=DEFAULT_SEED, # important to find the test files ensemble_nbest=10, - # small to trigger MemoryException - memory_limit=100, + memory_limit=10, # small to trigger MemoryException ) + ensbuilder.SAVE2DISC = False read_losses_file = os.path.join( @@ -555,28 +633,25 @@ def testLimit(ensemble_backend): ensemble_backend.internals_directory, "ensemble_read_preds.pkl" ) - def mtime_mock(filename): + def mtime_mock(filename: str) -> float: mtimes = { - "predictions_ensemble_0_1_0.0.npy": 0, + "predictions_ensemble_0_1_0.0.npy": 0.0, "predictions_valid_0_1_0.0.npy": 0.1, "predictions_test_0_1_0.0.npy": 0.2, - "predictions_ensemble_0_2_0.0.npy": 1, + "predictions_ensemble_0_2_0.0.npy": 1.0, "predictions_valid_0_2_0.0.npy": 1.1, "predictions_test_0_2_0.0.npy": 1.2, - "predictions_ensemble_0_3_100.0.npy": 2, + "predictions_ensemble_0_3_100.0.npy": 2.0, "predictions_valid_0_3_100.0.npy": 2.1, "predictions_test_0_3_100.0.npy": 2.2, } return mtimes[os.path.split(filename)[1]] - with unittest.mock.patch( - "logging.getLogger" - ) as get_logger_mock, unittest.mock.patch( - "logging.config.dictConfig" - ) as _, unittest.mock.patch( + with patch("logging.getLogger") as get_logger_mock, patch( "os.path.getmtime" - ) as mtime: - logger_mock = unittest.mock.Mock() + ) as mtime, patch("logging.config.dictConfig"): + + logger_mock = Mock() logger_mock.handlers = [] get_logger_mock.return_value = logger_mock mtime.side_effect = mtime_mock @@ -584,11 +659,14 @@ def mtime_mock(filename): ensbuilder.run(time_left=1000, iteration=0, pynisher_context="fork") assert os.path.exists(read_losses_file) assert not os.path.exists(read_preds_file) + print(logger_mock.warning.call_args_list) assert logger_mock.warning.call_count == 1 + ensbuilder.run(time_left=1000, iteration=0, pynisher_context="fork") assert os.path.exists(read_losses_file) assert not os.path.exists(read_preds_file) assert logger_mock.warning.call_count == 2 + ensbuilder.run(time_left=1000, iteration=0, pynisher_context="fork") assert os.path.exists(read_losses_file) assert not os.path.exists(read_preds_file) @@ -623,7 +701,8 @@ def mtime_mock(filename): ) -def test_read_pickle_read_preds(ensemble_backend): +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_read_pickle_read_preds(ensemble_backend: Backend) -> None: """ This procedure test that we save the read predictions before destroying the ensemble builder and that we are able to read @@ -632,9 +711,9 @@ def test_read_pickle_read_preds(ensemble_backend): ensbuilder = EnsembleBuilder( backend=ensemble_backend, dataset_name="TEST", - task_type=MULTILABEL_CLASSIFICATION, # Multilabel Classification + task_type=BINARY_CLASSIFICATION, metric=roc_auc, - seed=0, # important to find the test files + seed=DEFAULT_SEED, # important to find the test files ensemble_nbest=2, max_models_on_disc=None, ) @@ -652,7 +731,24 @@ def test_read_pickle_read_preds(ensemble_backend): with (open(ensemble_memory_file, "rb")) as memory: read_preds, last_hash = pickle.load(memory) - compare_read_preds(read_preds, ensbuilder.read_preds) + def assert_equal_read_preds(a: dict, b: dict) -> None: + """ + * Keys are check to be the same at each depth + * Any ndarray as check for equality with numpy + * Everything else is checked with regular equality + """ + # Both arrays should have the same splits + assert set(a.keys()) == set(b.keys()) + + for k in a.keys(): + if isinstance(a[k], dict): + assert_equal_read_preds(a[k], b[k]) + elif isinstance(a[k], np.ndarray): + np.testing.assert_array_equal(a[k], b[k]) + else: + assert a[k] == b[k], f"Key: {k}" + + assert_equal_read_preds(read_preds, ensbuilder.read_preds) assert last_hash == ensbuilder.last_hash ensemble_memory_file = os.path.join( @@ -664,7 +760,7 @@ def test_read_pickle_read_preds(ensemble_backend): with (open(ensemble_memory_file, "rb")) as memory: read_losses = pickle.load(memory) - compare_read_preds(read_losses, ensbuilder.read_losses) + assert_equal_read_preds(read_losses, ensbuilder.read_losses) # Then create a new instance, which should automatically read this file ensbuilder2 = EnsembleBuilder( @@ -676,16 +772,59 @@ def test_read_pickle_read_preds(ensemble_backend): ensemble_nbest=2, max_models_on_disc=None, ) - compare_read_preds(ensbuilder2.read_preds, ensbuilder.read_preds) - compare_read_preds(ensbuilder2.read_losses, ensbuilder.read_losses) + assert_equal_read_preds(ensbuilder2.read_preds, ensbuilder.read_preds) + assert_equal_read_preds(ensbuilder2.read_losses, ensbuilder.read_losses) assert ensbuilder2.last_hash == ensbuilder.last_hash -@pytest.mark.parametrize("metric", [log_loss, accuracy]) -@unittest.mock.patch("os.path.exists") +@patch("os.path.exists") +@parametrize("metric", [log_loss, accuracy]) +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) def test_get_identifiers_from_run_history( - exists, metric, ensemble_run_history, ensemble_backend -): + exists: Mock, + metric: Scorer, + ensemble_backend: Backend, +) -> None: + run_history = RunHistory() + run_history._add( + RunKey( + config_id=3, instance_id='{"task_id": "breast_cancer"}', seed=1, budget=3.0 + ), + RunValue( + cost=0.11347517730496459, + time=0.21858787536621094, + status=None, + starttime=time.time(), + endtime=time.time(), + additional_info={ + "duration": 0.20323538780212402, + "num_run": 3, + "configuration_origin": "Random Search", + }, + ), + status=None, + origin=None, + ) + run_history._add( + RunKey( + config_id=6, instance_id='{"task_id": "breast_cancer"}', seed=1, budget=6.0 + ), + RunValue( + cost=2 * 0.11347517730496459, + time=2 * 0.21858787536621094, + status=None, + starttime=time.time(), + endtime=time.time(), + additional_info={ + "duration": 0.20323538780212402, + "num_run": 6, + "configuration_origin": "Random Search", + }, + ), + status=None, + origin=None, + ) + return run_history exists.return_value = True ensemble = SingleBest( metric=log_loss, @@ -704,14 +843,20 @@ def test_get_identifiers_from_run_history( assert budget == 3.0 -def test_ensemble_builder_process_realrun(dask_client_single_worker, ensemble_backend): +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_ensemble_builder_process_realrun( + dask_client_single_worker: dask.distributed.Client, + ensemble_backend: Backend, +) -> None: + mock_metric = make_scorer("mock", lambda x, y: 0.9) + manager = EnsembleBuilderManager( start_time=time.time(), time_left_for_ensembles=1000, backend=ensemble_backend, dataset_name="Test", task=BINARY_CLASSIFICATION, - metric=MockMetric, + metric=mock_metric, ensemble_size=50, ensemble_nbest=10, max_models_on_disc=None, @@ -736,15 +881,15 @@ def test_ensemble_builder_process_realrun(dask_client_single_worker, ensemble_ba assert history[0]["ensemble_test_score"] == 0.9 +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) def test_ensemble_builder_nbest_remembered( - ensemble_backend, - dask_client_single_worker, -): + ensemble_backend: Backend, + dask_client_single_worker: dask.distributed.Client, +) -> None: """ Makes sure ensemble builder returns the size of the ensemble that pynisher allowed This way, we can remember it and not waste more time trying big ensemble sizes """ - manager = EnsembleBuilderManager( start_time=time.time(), time_left_for_ensembles=1000, From f28c3e4ecfc7df45dca2e96da9d0f5dd5928c201 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 27 Mar 2022 15:55:57 +0200 Subject: [PATCH 004/117] Update tests to be documented and cleaned up --- autosklearn/ensemble_builder.py | 16 +- test/test_ensemble_builder/test_ensemble.py | 427 ++++++++++---------- 2 files changed, 234 insertions(+), 209 deletions(-) diff --git a/autosklearn/ensemble_builder.py b/autosklearn/ensemble_builder.py index c17c23a4e2..e033a9b12b 100644 --- a/autosklearn/ensemble_builder.py +++ b/autosklearn/ensemble_builder.py @@ -1,4 +1,5 @@ from __future__ import annotations + from typing import List, Optional, Tuple, Union import glob @@ -120,7 +121,7 @@ def __init__( precision: [16,32,64,128] precision of floats to read the predictions - memory_limit: Optional[int] + ensemble_memory_limit: Optional[int] memory limit in mb. If ``None``, no memory limit is enforced. read_at_most: int @@ -182,7 +183,9 @@ def __call__( self.build_ensemble(smbo.tae_runner.client) def build_ensemble( - self, dask_client: dask.distributed.Client, unit_test: bool = False + self, + dask_client: dask.distributed.Client, + unit_test: bool = False, ) -> None: # The second criteria is elapsed time @@ -629,10 +632,11 @@ def run( elif time_left is not None and end_at is not None: raise ValueError("Cannot provide both time_left and end_at.") - self.logger = get_named_client_logger( - name="EnsembleBuilder", - port=self.logger_port, - ) + if not self.logger: + self.logger = get_named_client_logger( + name="EnsembleBuilder", + port=self.logger_port, + ) process_start_time = time.time() while True: diff --git a/test/test_ensemble_builder/test_ensemble.py b/test/test_ensemble_builder/test_ensemble.py index a41d90f560..012e110ce6 100644 --- a/test/test_ensemble_builder/test_ensemble.py +++ b/test/test_ensemble_builder/test_ensemble.py @@ -1,8 +1,11 @@ from __future__ import annotations +from typing import Any, Callable, Tuple + import os import pickle import time +from pathlib import Path import dask.distributed import numpy as np @@ -13,7 +16,7 @@ AbstractEnsemble, ) from autosklearn.automl_common.common.utils.backend import Backend -from autosklearn.constants import BINARY_CLASSIFICATION, MULTILABEL_CLASSIFICATION +from autosklearn.constants import BINARY_CLASSIFICATION from autosklearn.ensemble_builder import ( Y_ENSEMBLE, Y_TEST, @@ -24,10 +27,11 @@ from autosklearn.ensembles.singlebest_ensemble import SingleBest from autosklearn.metrics import Scorer, accuracy, log_loss, make_scorer, roc_auc -from pytest_cases import fixture, parametrize, parametrize_with_cases +from pytest_cases import parametrize, parametrize_with_cases from unittest.mock import Mock, patch import test.test_ensemble_builder.cases as cases +from test.fixtures.logging import MockLogger from test.conftest import DEFAULT_SEED @@ -115,11 +119,11 @@ def test_nbest( assert len(sel_keys) == expected - fixture = os.path.join( + expected = os.path.join( ensemble_backend.temporary_directory, ".auto-sklearn/runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy", ) - assert sel_keys[0] == fixture + assert sel_keys[0] == expected @parametrize( @@ -391,13 +395,13 @@ def test_fall_back_nbest(ensemble_backend: Backend) -> None: sel_keys = ensbuilder.get_n_best_preds() best_model = "0_1_0.0" - fixture = os.path.join( + expected = os.path.join( ensemble_backend.temporary_directory, f".auto-sklearn/runs/{best_model}/predictions_ensemble_{best_model}.npy", ) assert len(sel_keys) == 1 - assert sel_keys[0] == fixture + assert sel_keys[0] == expected @parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) @@ -525,31 +529,54 @@ def test_ensemble_builder_predictions(ensemble_backend: Backend) -> None: @parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) def test_main(ensemble_backend: Backend) -> None: + """ + Parameters + ---------- + ensemble_backend : Backend + The ensemble_backend to use, this test relies on this specific case + + Expects + ------- + * There should be "read_preds" and "read_losses" saved to file + * There should be 3 model reads + * There should be a hash for the preds read in + * The true targets should have been read in + * The length of the history returned by run should be the same as the iterations + performed. + * The run history should contain "optimization", "val" and "test" scores, each being + the same at 1.0 due to the setup of "setup_3_models". + """ + iters = 1 + ensbuilder = EnsembleBuilder( backend=ensemble_backend, dataset_name="TEST", task_type=BINARY_CLASSIFICATION, metric=roc_auc, seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=2, - max_models_on_disc=None, ) - ensbuilder.SAVE2DISC = False run_history, ensemble_nbest, _, _, _ = ensbuilder.main( time_left=np.inf, - iteration=1, + iteration=iters, return_predictions=False, ) + internals_dir = Path(ensemble_backend.internals_directory) + read_preds_path = (internals_dir / "ensemble_read_preds.pkl") + read_losses_path = (internals_dir / "ensemble_read_losses.pkl") + + assert read_preds_path.exists(), list(internals_dir.iterdir()) + assert read_losses_path.exists(), list(internals_dir.iterdir()) + + # There should be three preds read assert len(ensbuilder.read_preds) == 3 assert ensbuilder.last_hash is not None assert ensbuilder.y_true_ensemble is not None - # Make sure the run history is ok - - # We expect at least 1 element to be in the ensemble - assert len(run_history) > 0 + # We expect as many iterations as the iters param + assert len(run_history) == iters + hist_item = run_history[0] # As the data loader loads the same val/train/test # we expect 1.0 as score and all keys available @@ -559,154 +586,167 @@ def test_main(ensemble_backend: Backend) -> None: "ensemble_optimization_score": 1.0, } - # Make sure that expected performance is a subset of the run history - assert all(item in run_history[0].items() for item in expected_performance.items()) - assert "Timestamp" in run_history[0] - assert isinstance(run_history[0]["Timestamp"], pd.Timestamp) - - assert os.path.exists( - os.path.join(ensemble_backend.internals_directory, "ensemble_read_preds.pkl") - ), os.listdir(ensemble_backend.internals_directory) - assert os.path.exists( - os.path.join(ensemble_backend.internals_directory, "ensemble_read_losses.pkl") - ), os.listdir(ensemble_backend.internals_directory) + assert all(key in hist_item for key in expected_performance) + assert all(hist_item[key] == score for key, score in expected_performance.items()) + assert "Timestamp" in hist_item +@parametrize("time_buffer", [1, 5]) +@parametrize("duration", [10, 20]) @parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_run_end_at(ensemble_backend: Backend) -> None: +def test_run_end_at(ensemble_backend: Backend, time_buffer: int, duration: int) -> None: + """ + Parameters + ---------- + ensemble_backend : Backend + The backend to use + + time_buffer: int + How much time buffer to give to the ensemble builder + + duration: int + How long to run the ensemble builder for + + Expects + ------- + * The limits enforced by pynisher should account for the time_buffer and duration + to run for + a little bit of overhead that gets rounded to a second. + """ with patch("pynisher.enforce_limits") as pynisher_mock: ensbuilder = EnsembleBuilder( backend=ensemble_backend, dataset_name="TEST", task_type=BINARY_CLASSIFICATION, metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=2, - max_models_on_disc=None, ) - ensbuilder.SAVE2DISC = False - - current_time = time.time() ensbuilder.run( - end_at=current_time + 10, + end_at=time.time() + duration, iteration=1, + time_buffer=time_buffer, pynisher_context="forkserver", ) - # 4 seconds left because: 10 seconds - 5 seconds overhead - little overhead - # but then rounded to an integer - assert pynisher_mock.call_args_list[0][1]["wall_time_in_s"] == 4 + + # The 1 comes from the small overhead in conjuction with rounding down + expected = duration - time_buffer - 1 + assert pynisher_mock.call_args_list[0][1]["wall_time_in_s"] == expected @parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_limit(ensemble_backend: Backend) -> None: - class EnsembleBuilderMemMock(EnsembleBuilder): - def fit_ensemble(self, selected_keys): - return True - - def predict( - self, - set_: str, - ensemble: AbstractEnsemble, - selected_keys: list, - n_preds: int, - index_run: int, - ): - np.ones([10000000, 1000000]) - - ensbuilder = EnsembleBuilderMemMock( +def test_limit( + ensemble_backend: Backend, + mock_logger: MockLogger, +) -> None: + """ + + Parameters + ---------- + ensemble_backend : Backend + The backend setup to use + + Fixtures + -------- + mock_logger: MockLogger + A logger to inject into the EnsembleBuilder for tracking calls + + Expects + ------- + * Running from (ensemble_nbest, read_at_most) = (10, 5) where a memory exception + occurs in each run, we expect ensemble_nbest to be halved continuously until + it reaches 0, at which point read_at_most is reduced directly to 1. + """ + expected_states = [(10, 5), (5, 5), (2, 5), (1, 5), (0, 1)] + + starting_state = expected_states[0] + intermediate_states = expected_states[1:-1] + final_state = expected_states[-1] + + starting_nbest, starting_read_at_most = starting_state + + ensbuilder = EnsembleBuilder( backend=ensemble_backend, dataset_name="TEST", task_type=BINARY_CLASSIFICATION, metric=roc_auc, seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=10, - memory_limit=10, # small to trigger MemoryException + ensemble_nbest=starting_nbest, + read_at_most=starting_read_at_most, + memory_limit=1, ) + ensbuilder.predict = Mock(side_effect=MemoryError) # Force a memory error + ensbuilder.logger = mock_logger # Mock its logger ensbuilder.SAVE2DISC = False - read_losses_file = os.path.join( - ensemble_backend.internals_directory, "ensemble_read_losses.pkl" - ) - read_preds_file = os.path.join( - ensemble_backend.internals_directory, "ensemble_read_preds.pkl" - ) + internal_dir = Path(ensemble_backend.internals_directory) + read_losses_file = internal_dir / "ensemble_read_losses.pkl" + read_preds_file = internal_dir / "ensemble_read_preds.pkl" def mtime_mock(filename: str) -> float: + """TODO, not really sure why we have to force these""" + path = Path(filename) mtimes = { + # At second 0 "predictions_ensemble_0_1_0.0.npy": 0.0, "predictions_valid_0_1_0.0.npy": 0.1, "predictions_test_0_1_0.0.npy": 0.2, + # At second 1 "predictions_ensemble_0_2_0.0.npy": 1.0, "predictions_valid_0_2_0.0.npy": 1.1, "predictions_test_0_2_0.0.npy": 1.2, + # At second 2 "predictions_ensemble_0_3_100.0.npy": 2.0, "predictions_valid_0_3_100.0.npy": 2.1, "predictions_test_0_3_100.0.npy": 2.2, } - return mtimes[os.path.split(filename)[1]] + return mtimes[path.name] - with patch("logging.getLogger") as get_logger_mock, patch( - "os.path.getmtime" - ) as mtime, patch("logging.config.dictConfig"): - - logger_mock = Mock() - logger_mock.handlers = [] - get_logger_mock.return_value = logger_mock + with patch("os.path.getmtime") as mtime: mtime.side_effect = mtime_mock - ensbuilder.run(time_left=1000, iteration=0, pynisher_context="fork") - assert os.path.exists(read_losses_file) - assert not os.path.exists(read_preds_file) - print(logger_mock.warning.call_args_list) - assert logger_mock.warning.call_count == 1 + starting_state = (starting_nbest, starting_read_at_most) + assert (ensbuilder.ensemble_nbest, ensbuilder.read_at_most) == starting_state - ensbuilder.run(time_left=1000, iteration=0, pynisher_context="fork") - assert os.path.exists(read_losses_file) - assert not os.path.exists(read_preds_file) - assert logger_mock.warning.call_count == 2 + intermediate_states = [(5, 5), (2, 5), (1, 5), (0, 1)] + for i, exp_state in enumerate(intermediate_states, start=1): + ensbuilder.run(time_left=1000, iteration=0, pynisher_context="fork") - ensbuilder.run(time_left=1000, iteration=0, pynisher_context="fork") - assert os.path.exists(read_losses_file) - assert not os.path.exists(read_preds_file) - assert logger_mock.warning.call_count == 3 + assert read_losses_file.exists() + assert not read_preds_file.exists() + + assert mock_logger.warning.call_count == i - # it should try to reduce ensemble_nbest until it also failed at 2 - assert ensbuilder.ensemble_nbest == 1 + assert (ensbuilder.ensemble_nbest, ensbuilder.read_at_most) == exp_state + # At this point, when we've reached (ensemble_nbest, read_at_most) = (0, 1), + # we can still run the ensbulder but it should just raise an error and not + # change it's internal state ensbuilder.run(time_left=1000, iteration=0, pynisher_context="fork") - assert os.path.exists(read_losses_file) - assert not os.path.exists(read_preds_file) - assert logger_mock.warning.call_count == 4 - # it should next reduce the number of models to read at most - assert ensbuilder.read_at_most == 1 + assert read_losses_file.exists() + assert not read_preds_file.exists() - # And then it still runs, but basically won't do anything any more except for - # raising error messages via the logger - ensbuilder.run(time_left=1000, iteration=0, pynisher_context="fork") - assert os.path.exists(read_losses_file) - assert not os.path.exists(read_preds_file) - assert logger_mock.warning.call_count == 4 - - # In the previous assert, reduction is tried until failure - # So that means we should have more than 1 memoryerror message - assert logger_mock.error.call_count >= 1, "{}".format( - logger_mock.error.call_args_list - ) - for i in range(len(logger_mock.error.call_args_list)): - assert "Memory Exception -- Unable to further reduce" in str( - logger_mock.error.call_args_list[i] - ) + assert (ensbuilder.ensemble_nbest, ensbuilder.read_at_most) == final_state + + assert mock_logger.warning.call_count == len(intermediate_states) + assert mock_logger.error.call_count == 1, mock_logger.error.call_args_list + + for call_arg in mock_logger.error.call_args_list: + assert "Memory Exception -- Unable to further reduce" in str(call_arg) @parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) def test_read_pickle_read_preds(ensemble_backend: Backend) -> None: """ - This procedure test that we save the read predictions before - destroying the ensemble builder and that we are able to read - them safely after + Parameters + ---------- + ensemble_backend : Backend + THe ensemble backend to use + + Expects + ------- + * The read_losses and read_preds should be cached between creation of + the EnsembleBuilder. """ ensbuilder = EnsembleBuilder( backend=ensemble_backend, @@ -722,13 +762,14 @@ def test_read_pickle_read_preds(ensemble_backend: Backend) -> None: ensbuilder.main(time_left=np.inf, iteration=1, return_predictions=False) # Check that the memory was created - ensemble_memory_file = os.path.join( - ensemble_backend.internals_directory, "ensemble_read_preds.pkl" - ) - assert os.path.exists(ensemble_memory_file) + internal_dir = Path(ensemble_backend.internals_directory) + losses_file = internal_dir / "ensemble_read_losses.pkl" + memory_file = internal_dir / "ensemble_read_preds.pkl" + + assert memory_file.exists() # Make sure we pickle the correct read preads and hash - with (open(ensemble_memory_file, "rb")) as memory: + with memory_file.open("rb") as memory: read_preds, last_hash = pickle.load(memory) def assert_equal_read_preds(a: dict, b: dict) -> None: @@ -751,13 +792,10 @@ def assert_equal_read_preds(a: dict, b: dict) -> None: assert_equal_read_preds(read_preds, ensbuilder.read_preds) assert last_hash == ensbuilder.last_hash - ensemble_memory_file = os.path.join( - ensemble_backend.internals_directory, "ensemble_read_losses.pkl" - ) - assert os.path.exists(ensemble_memory_file) + assert losses_file.exists() # Make sure we pickle the correct read scores - with (open(ensemble_memory_file, "rb")) as memory: + with losses_file.open("rb") as memory: read_losses = pickle.load(memory) assert_equal_read_preds(read_losses, ensbuilder.read_losses) @@ -766,7 +804,7 @@ def assert_equal_read_preds(a: dict, b: dict) -> None: ensbuilder2 = EnsembleBuilder( backend=ensemble_backend, dataset_name="TEST", - task_type=MULTILABEL_CLASSIFICATION, # Multilabel Classification + task_type=BINARY_CLASSIFICATION, metric=roc_auc, seed=0, # important to find the test files ensemble_nbest=2, @@ -777,78 +815,34 @@ def assert_equal_read_preds(a: dict, b: dict) -> None: assert ensbuilder2.last_hash == ensbuilder.last_hash -@patch("os.path.exists") -@parametrize("metric", [log_loss, accuracy]) @parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_get_identifiers_from_run_history( - exists: Mock, - metric: Scorer, +def test_ensemble_builder_process_realrun( ensemble_backend: Backend, + make_dask_client: Callable[..., [dask.distributed.Client]], ) -> None: - run_history = RunHistory() - run_history._add( - RunKey( - config_id=3, instance_id='{"task_id": "breast_cancer"}', seed=1, budget=3.0 - ), - RunValue( - cost=0.11347517730496459, - time=0.21858787536621094, - status=None, - starttime=time.time(), - endtime=time.time(), - additional_info={ - "duration": 0.20323538780212402, - "num_run": 3, - "configuration_origin": "Random Search", - }, - ), - status=None, - origin=None, - ) - run_history._add( - RunKey( - config_id=6, instance_id='{"task_id": "breast_cancer"}', seed=1, budget=6.0 - ), - RunValue( - cost=2 * 0.11347517730496459, - time=2 * 0.21858787536621094, - status=None, - starttime=time.time(), - endtime=time.time(), - additional_info={ - "duration": 0.20323538780212402, - "num_run": 6, - "configuration_origin": "Random Search", - }, - ), - status=None, - origin=None, - ) - return run_history - exists.return_value = True - ensemble = SingleBest( - metric=log_loss, - seed=1, - run_history=ensemble_run_history, - backend=ensemble_backend, - ) - - # Just one model - assert len(ensemble.identifiers_) == 1 + """ - # That model must be the best - seed, num_run, budget = ensemble.identifiers_[0] - assert num_run == 3 - assert seed == 1 - assert budget == 3.0 + Parameters + ---------- + ensemble_backend : Backend + The backend to use, doesn't really matter which kind + Fixtures + -------- + make_dask_client : Callable[..., [dask.distributed.Client]] -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_ensemble_builder_process_realrun( - dask_client_single_worker: dask.distributed.Client, - ensemble_backend: Backend, -) -> None: + Expects + ------- + * With 1 iteration, the history should only be of length one + * The expected ensmble score keys for "optimization", "valid" and "test" should + be in the one history item. + * The "Timestamp" key should be in the history item + * With a metric that always returns 0.9, each ensemble score should be 0.9 in the + history item + """ + dask_client = make_dask_client(n_workers=1) mock_metric = make_scorer("mock", lambda x, y: 0.9) + iterations = 1 manager = EnsembleBuilderManager( start_time=time.time(), @@ -860,42 +854,68 @@ def test_ensemble_builder_process_realrun( ensemble_size=50, ensemble_nbest=10, max_models_on_disc=None, - seed=0, + seed=DEFAULT_SEED, precision=32, - max_iterations=1, + max_iterations=iterations, read_at_most=np.inf, ensemble_memory_limit=None, random_state=0, ) - manager.build_ensemble(dask_client_single_worker) + manager.build_ensemble(dask_client) future = manager.futures.pop() dask.distributed.wait([future]) # wait for the ensemble process to finish + result = future.result() history, _, _, _, _ = result - assert "ensemble_optimization_score" in history[0] - assert history[0]["ensemble_optimization_score"] == 0.9 - assert "ensemble_val_score" in history[0] - assert history[0]["ensemble_val_score"] == 0.9 - assert "ensemble_test_score" in history[0] - assert history[0]["ensemble_test_score"] == 0.9 + assert len(history) == iterations + + hist_item = history[0] + + expected_scores = { + f"ensemble_{key}_score": 0.9 for key in ["optimization", "val", "test"] + } + + assert "Timestamp" in hist_item + assert all(key in hist_item for key in expected_scores) + assert all(hist_item[key] == expected_scores[key] for key in expected_scores) @parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) def test_ensemble_builder_nbest_remembered( ensemble_backend: Backend, - dask_client_single_worker: dask.distributed.Client, + make_dask_client: Callable[..., [dask.distributed.Client]], ) -> None: """ - Makes sure ensemble builder returns the size of the ensemble that pynisher allowed - This way, we can remember it and not waste more time trying big ensemble sizes + Parameters + ---------- + ensemble_backend: Backend + The backend to use, relies on the 3 setup models + + Fixtures + -------- + make_dask_client: (...) -> Client + Make a dask client + + Expects + ------- + * The read_preds file should not be created + * The ensemble_nbest should be remembered and reduced between runs + TODO Note sure why there would be a reduction and how these numbers were made + + Last Note + --------- + "Makes sure ensemble builder returns the size of the ensemble that pynisher allowed + This way, we can remember it and not waste more time trying big ensemble sizes" """ + dask_client = make_dask_client(n_workers=1) + manager = EnsembleBuilderManager( start_time=time.time(), time_left_for_ensembles=1000, backend=ensemble_backend, dataset_name="Test", - task=MULTILABEL_CLASSIFICATION, + task=BINARY_CLASSIFICATION, metric=roc_auc, ensemble_size=50, ensemble_nbest=10, @@ -908,18 +928,19 @@ def test_ensemble_builder_nbest_remembered( max_iterations=None, ) - manager.build_ensemble(dask_client_single_worker, unit_test=True) + filepath = Path(ensemble_backend.internals_directory) / "ensemble_read_preds.pkl" + + manager.build_ensemble(dask_client, unit_test=True) future = manager.futures[0] dask.distributed.wait([future]) # wait for the ensemble process to finish + assert future.result() == ([], 5, None, None, None) - file_path = os.path.join( - ensemble_backend.internals_directory, "ensemble_read_preds.pkl" - ) - assert not os.path.exists(file_path) - manager.build_ensemble(dask_client_single_worker, unit_test=True) + assert not filepath.exists() + manager.build_ensemble(dask_client, unit_test=True) future = manager.futures[0] dask.distributed.wait([future]) # wait for the ensemble process to finish - assert not os.path.exists(file_path) + + assert not filepath.exists() assert future.result() == ([], 2, None, None, None) From 96133125351515aaa101f3c5cbbdf4f9f7ad489d Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 27 Mar 2022 17:57:28 +0200 Subject: [PATCH 005/117] Switch to using cached automl backends --- autosklearn/automl.py | 4 +- test/fixtures/automl.py | 9 + test/fixtures/backend.py | 20 +- .../ensemble_builder.py} | 0 test/mocks/logging.py | 14 +- test/test_ensemble_builder/cases.py | 77 +- .../__init__.py} | 0 .../test_3_models/cases.py | 100 ++ .../.auto-sklearn/runs/0_1_0.0/0.1.0.0.model} | 0 .../.auto-sklearn/runs/0_2_0.0/0.2.0.0.model} | 0 .../runs/0_3_100.0/0.3.0.0.model | 0 .../runs/0_3_100.0/0.3.100.0.model | 0 .../test_3_models/test_3_models.py | 874 ++++++++++++++++++ test/test_ensemble_builder/test_ensemble.py | 130 +-- .../test_ensemble_manager.py | 81 ++ .../runs/0_2_0.0/predictions_test_0_2_0.0.np | Bin 160 -> 0 bytes 16 files changed, 1132 insertions(+), 177 deletions(-) rename test/{test_ensemble_builder/toy_data/.auto-sklearn/runs/0_1_0.0/0.1.0.0.model => fixtures/ensemble_builder.py} (100%) rename test/test_ensemble_builder/{toy_data/.auto-sklearn/runs/0_2_0.0/0.2.0.0.model => test_3_models/__init__.py} (100%) create mode 100644 test/test_ensemble_builder/test_3_models/cases.py rename test/test_ensemble_builder/{toy_data/.auto-sklearn/runs/0_3_100.0/0.3.0.0.model => test_3_models/data/.auto-sklearn/runs/0_1_0.0/0.1.0.0.model} (100%) rename test/test_ensemble_builder/{toy_data/.auto-sklearn/runs/0_3_100.0/0.3.100.0.model => test_3_models/data/.auto-sklearn/runs/0_2_0.0/0.2.0.0.model} (100%) create mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/0.3.0.0.model create mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/0.3.100.0.model create mode 100644 test/test_ensemble_builder/test_3_models/test_3_models.py create mode 100644 test/test_ensemble_builder/test_ensemble_manager.py delete mode 100644 test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.np diff --git a/autosklearn/automl.py b/autosklearn/automl.py index 1496aa2224..3c6caecc88 100644 --- a/autosklearn/automl.py +++ b/autosklearn/automl.py @@ -2130,7 +2130,7 @@ def fit( y: SUPPORTED_TARGET_TYPES | spmatrix, X_test: Optional[SUPPORTED_FEAT_TYPES] = None, y_test: Optional[SUPPORTED_TARGET_TYPES | spmatrix] = None, - feat_type: Optional[list[bool]] = None, + feat_type: Optional[list[str]] = None, dataset_name: Optional[str] = None, only_return_configuration_space: bool = False, load_models: bool = True, @@ -2220,7 +2220,7 @@ def fit( y: SUPPORTED_TARGET_TYPES | spmatrix, X_test: Optional[SUPPORTED_FEAT_TYPES] = None, y_test: Optional[SUPPORTED_TARGET_TYPES | spmatrix] = None, - feat_type: Optional[list[bool]] = None, + feat_type: Optional[list[str]] = None, dataset_name: Optional[str] = None, only_return_configuration_space: bool = False, load_models: bool = True, diff --git a/test/fixtures/automl.py b/test/fixtures/automl.py index abf31d304d..ced4297050 100644 --- a/test/fixtures/automl.py +++ b/test/fixtures/automl.py @@ -39,8 +39,17 @@ def _create_automl( "per_run_time_limit": 5, "seed": DEFAULT_SEED, "n_jobs": 2, + "ensemble_size": 10, + "ensemble_nbest": 10, + "max_models_on_disc": 10, + "initial_configurations_via_metalearning": 5, } + # If a temp directory was explicitly passed, don't delete it automatically + # Normally the `tmp_path` fixutre will delete it anyways + if "temporary_directory" in kwargs: + test_defaults["delete_tmp_folder_after_terminate"] = False + opts: Dict[str, Any] = {**test_defaults, **kwargs} if "dask_client" not in opts: diff --git a/test/fixtures/backend.py b/test/fixtures/backend.py index 5557b34a96..393bacde28 100644 --- a/test/fixtures/backend.py +++ b/test/fixtures/backend.py @@ -1,7 +1,9 @@ -from typing import Callable, Union, Optional -from distutils.dir_util import copy_tree +from __future__ import annotations + +from typing import Callable, Optional, Union import os +from distutils.dir_util import copy_tree from pathlib import Path from autosklearn.automl_common.common.utils.backend import Backend, create @@ -49,7 +51,7 @@ def make_backend() -> Callable[..., Backend]: # TODO redo once things use paths def _make( path: Union[str, Path], - template: Optional[Path] = None, + template: Optional[Path | Backend] = None, ) -> Backend: _path = Path(path) if not isinstance(path, Path) else path assert not _path.exists(), "Try passing path / 'backend'" @@ -61,9 +63,15 @@ def _make( ) if template is not None: - assert template.exists() - dest = Path(backend.temporary_directory) - copy_tree(str(template), str(dest)) + if isinstance(template, Backend): + template = Path(template.temporary_directory) + + if isinstance(template, Path): + assert template.exists() + dest = Path(backend.temporary_directory) + copy_tree(str(template), str(dest)) + else: + raise NotImplementedError(template) return backend diff --git a/test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_1_0.0/0.1.0.0.model b/test/fixtures/ensemble_builder.py similarity index 100% rename from test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_1_0.0/0.1.0.0.model rename to test/fixtures/ensemble_builder.py diff --git a/test/mocks/logging.py b/test/mocks/logging.py index e61ca2c870..50e6578489 100644 --- a/test/mocks/logging.py +++ b/test/mocks/logging.py @@ -30,9 +30,11 @@ def __init__( self.port = port or MOCKPORT # Overwrite the logging implementations with mocks - attrs = ["debug", "info", "warning", "error", "exception", "critical", "log"] - for attr in attrs: - setattr(self, attr, Mock(return_value=None)) - - # This mock logger is enabled for all levels - setattr(self, "isEnabledFor", Mock(return_value=True)) + self.debug = Mock(return_value=None) # type: ignore + self.info = Mock(return_value=None) # type: ignore + self.warning = Mock(return_value=None) # type: ignore + self.error = Mock(return_value=None) # type: ignore + self.exception = Mock(return_value=None) # type: ignore + self.critical = Mock(return_value=None) # type: ignore + self.log = Mock(return_value=None) # type: ignore + self.isEnabledFor = Mock(return_value=True) # type: ignore diff --git a/test/test_ensemble_builder/cases.py b/test/test_ensemble_builder/cases.py index 9761eed1f6..9b69510d92 100644 --- a/test/test_ensemble_builder/cases.py +++ b/test/test_ensemble_builder/cases.py @@ -1,63 +1,40 @@ from typing import Callable -import pickle from pathlib import Path +from shutil import rmtree -import numpy as np - +from autosklearn.automl import AutoML from autosklearn.automl_common.common.utils.backend import Backend -from autosklearn.constants import BINARY_CLASSIFICATION -from autosklearn.data.xy_data_manager import XYDataManager -from pytest_cases import case +from pytest_cases import case, parametrize_with_cases -HERE = Path(__file__).parent.resolve() +import test.test_automl.cases as cases -@case(tags=["backend", "setup_3_models"]) -def case_backend_setup_3_models( +@case +@parametrize_with_cases("automl", cases=cases, has_tag="fitted") +def case_fitted_automl( tmp_path: Path, + automl: AutoML, make_backend: Callable[..., Backend], - make_sklearn_dataset: Callable[..., XYDataManager], ) -> Backend: - """See the contents of TOY_DATA for full details - - /toy_data - /.auto-sklearn - /runs - /0_1_0.0 - /0_2_0.0 - /0_3_100.0 - /datamanger.pkl - /predictions_ensemble_targets.npy - /true_targets_ensemble.npy # Same file as predictions_ensemble_targets + """Gives the backend for from the cached automl instance + + We do this by copying the backend produced from these cached automl runs to a new + tmp directory for the ensemble builder tests to run from + + We also delete ensemble building specific things so that ensemble sees them as + just a collection of runs and no previous ensemble building has been done. """ - path = tmp_path / "backend" - TOY_DATA = HERE / "toy_data" - - # Create the datamanager that was used if needed - dm_path = TOY_DATA / ".auto-sklearn" / "datamanager.pkl" - - if not dm_path.exists(): - datamanager = make_sklearn_dataset( - name="breast_cancer", - task=BINARY_CLASSIFICATION, - feat_type="numerical", # They're all numerical - as_datamanager=True, - ) - - # For some reason, the old mock was just returning this array as: - # - # datamanger.data.get.return_value = array - # - model_3_path = TOY_DATA / ".auto-sklearn" / "runs" / "0_3_100.0" - test_preds = model_3_path / "predictions_test_0_3_100.0.npy" - array = np.load(test_preds) - - datamanager.data["Y_valid"] = array - datamanager.data["Y_test"] = array - - with dm_path.open("wb") as f: - pickle.dump(datamanager, f) - - return make_backend(path=path, template=TOY_DATA) + original_backend = automl._backend + backend_path = tmp_path / "backend" + + backend = make_backend(path=backend_path, template=original_backend) + + ensemble_dir = Path(backend.get_ensemble_dir()) + rmtree(ensemble_dir) + + ensemble_hist = Path(backend.internals_directory) / "ensemble_history.json" + ensemble_hist.unlink() + + return backend diff --git a/test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_2_0.0/0.2.0.0.model b/test/test_ensemble_builder/test_3_models/__init__.py similarity index 100% rename from test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_2_0.0/0.2.0.0.model rename to test/test_ensemble_builder/test_3_models/__init__.py diff --git a/test/test_ensemble_builder/test_3_models/cases.py b/test/test_ensemble_builder/test_3_models/cases.py new file mode 100644 index 0000000000..f6fb826f73 --- /dev/null +++ b/test/test_ensemble_builder/test_3_models/cases.py @@ -0,0 +1,100 @@ +"""See the contents of TOY_DATA for full details + +/data +└── .auto-sklearn + ├── runs + │ ├── 0_1_0.0 + │ │ ├── 0.1.0.0.model + │ │ ├── predictions_ensemble_0_1_0.0.npy + │ │ ├── predictions_test_0_1_0.0.npy + │ │ └── predictions_valid_0_1_0.0.npy + │ ├── 0_2_0.0 + │ │ ├── 0.2.0.0.model + │ │ ├── predictions_ensemble_0_2_0.0.npy + │ │ ├── predictions_test_0_2_0.0.np + │ │ ├── predictions_test_0_2_0.0.npy + │ │ └── predictions_valid_0_2_0.0.npy + │ └── 0_3_100.0 + │ ├── 0.3.0.0.model + │ ├── 0.3.100.0.model + │ ├── predictions_ensemble_0_3_100.0.npy + │ ├── predictions_test_0_3_100.0.npy + │ └── predictions_valid_0_3_100.0.npy + ├── datamanager.pkl + ├── true_targets_ensemble.npy + └── predictions_ensemble_true.npy + +# Ensemble targets and predictions +Both `predictions_ensemble_targets` and `true_targets_ensemble` are the same set of data +* [ [1, 0], [0, 1], [0, 1], [0, 1], [0, 1], ] + +# 0_1_0.0 +All of run 0_1_0.0's predictions for "ensemble" "test" and "valid" are differing by +their predictions in the first key. +* [ [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], ] + +# 0_2_0.0, 0_3_100.0 +All of run 0_2_0.0's predictions for "ensemble" "test" and "valid" are exactly the same +as the `true_targets_ensemble` and `predictions_ensemble_true` +* [ [1, 0], [0, 1], [0, 1], [0, 1], [0, 1], ] + +# Models +The models are empty files. + +# Datamanager +The datamanager contains the iris dataset as the above numbers are made up with no +real corresponding models so the data from the datamanager can not be faked so easily. +""" + +from typing import Callable + +import pickle +from pathlib import Path + +import numpy as np + +from autosklearn.automl_common.common.utils.backend import Backend +from autosklearn.constants import BINARY_CLASSIFICATION +from autosklearn.data.xy_data_manager import XYDataManager + +from pytest_cases import case + +HERE = Path(__file__).parent.resolve() +DATADIR = HERE / "data" + + +@case +def case_3_models( + tmp_path: Path, + make_backend: Callable[..., Backend], + make_sklearn_dataset: Callable[..., XYDataManager], +) -> Backend: + """Gives the backend for the this certain setup""" + path = tmp_path / "backend" + + # Create the datamanager that was used if needed + dm_path = DATADIR / ".auto-sklearn" / "datamanager.pkl" + + if not dm_path.exists(): + datamanager = make_sklearn_dataset( + name="breast_cancer", + task=BINARY_CLASSIFICATION, + feat_type="numerical", # They're all numerical + as_datamanager=True, + ) + + # For some reason, the old mock was just returning this array as: + # + # datamanger.data.get.return_value = array + # + model_3_path = DATADIR / ".auto-sklearn" / "runs" / "0_3_100.0" + test_preds = model_3_path / "predictions_test_0_3_100.0.npy" + array = np.load(test_preds) + + datamanager.data["Y_valid"] = array + datamanager.data["Y_test"] = array + + with dm_path.open("wb") as f: + pickle.dump(datamanager, f) + + return make_backend(path=path, template=DATADIR) diff --git a/test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_3_100.0/0.3.0.0.model b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/0.1.0.0.model similarity index 100% rename from test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_3_100.0/0.3.0.0.model rename to test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/0.1.0.0.model diff --git a/test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_3_100.0/0.3.100.0.model b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/0.2.0.0.model similarity index 100% rename from test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_3_100.0/0.3.100.0.model rename to test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/0.2.0.0.model diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/0.3.0.0.model b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/0.3.0.0.model new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/0.3.100.0.model b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/0.3.100.0.model new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/test_ensemble_builder/test_3_models/test_3_models.py b/test/test_ensemble_builder/test_3_models/test_3_models.py new file mode 100644 index 0000000000..5c1a35436c --- /dev/null +++ b/test/test_ensemble_builder/test_3_models/test_3_models.py @@ -0,0 +1,874 @@ +from __future__ import annotations + +from typing import Callable + +import os +import pickle +import time +from pathlib import Path + +import dask.distributed +import numpy as np + +from autosklearn.automl_common.common.utils.backend import Backend +from autosklearn.constants import BINARY_CLASSIFICATION +from autosklearn.ensemble_builder import ( + Y_ENSEMBLE, + Y_TEST, + Y_VALID, + EnsembleBuilder, + EnsembleBuilderManager, +) +from autosklearn.metrics import make_scorer, roc_auc + +from pytest_cases import parametrize, parametrize_with_cases +from unittest.mock import Mock, patch + +import test.test_ensemble_builder.test_3_models.cases as cases +from test.conftest import DEFAULT_SEED +from test.fixtures.logging import MockLogger + + +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_read(ensemble_backend: Backend) -> None: + ensbuilder = EnsembleBuilder( + backend=ensemble_backend, + dataset_name="TEST", + task_type=BINARY_CLASSIFICATION, + metric=roc_auc, + seed=DEFAULT_SEED, # important to find the test files + ) + + success = ensbuilder.compute_loss_per_model() + assert success, f"read_preds = {str(ensbuilder.read_preds)}" + + assert len(ensbuilder.read_preds) == 3, ensbuilder.read_preds.keys() + assert len(ensbuilder.read_losses) == 3, ensbuilder.read_losses.keys() + + runsdir = Path(ensemble_backend.get_runs_directory()) + preds_1 = runsdir / "predictions_ensemble_0_1_0.0.npy" + preds_2 = runsdir / "predictions_ensemble_0_2_0.0.npy" + + assert ensbuilder.read_losses[str(preds_1)]["ens_loss"] == 0.5 + assert ensbuilder.read_losses[str(preds_2)]["ens_loss"] == 0.0 + + +@parametrize( + "ensemble_nbest, max_models_on_disc, expected", + ( + (1, None, 1), + (1.0, None, 2), + (0.1, None, 1), + (0.9, None, 1), + (1, 2, 1), + (2, 1, 1), + ), +) +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_nbest( + ensemble_backend: Backend, + ensemble_nbest: int | float, + max_models_on_disc: int | None, + expected: int, +) -> None: + """ + Parameters + ---------- + ensemble_backend: Backend + The backend to use. In this case, we specifically rely on the `setup_3_models` + setup. + + ensemble_nbest: int | float + The parameter to use for consider the n best, int being absolute and float being + fraction. + + max_models_on_disc: int | None + The maximum amount of models to keep on disk + + expected: int + The number of keys expected to be selected + + Expects + ------- + * get_n_best_preds should contain 2 keys + * The first key should be model 0_2_0_0 + """ + ensbuilder = EnsembleBuilder( + backend=ensemble_backend, + dataset_name="TEST", + task_type=BINARY_CLASSIFICATION, + metric=roc_auc, + seed=DEFAULT_SEED, # important to find the test files + ensemble_nbest=ensemble_nbest, + max_models_on_disc=max_models_on_disc, + ) + + ensbuilder.compute_loss_per_model() + sel_keys = ensbuilder.get_n_best_preds() + + assert len(sel_keys) == expected + + expected_sel = os.path.join( + ensemble_backend.temporary_directory, + ".auto-sklearn/runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy", + ) + assert sel_keys[0] == expected_sel + + +@parametrize( + "max_models_on_disc, expected", + [ + # If None, no reduction + (None, 2), + # If Int, limit only on exceed + (4, 2), + (1, 1), + # If Float, translate float to # models. + # below, mock of each file is 100 Mb and 4 files .model and .npy (test/val/pred) + # per run (except for run3, there they are 5). Now, it takes 500MB for run 3 and + # another 500 MB of slack because we keep as much space as the largest model + # available as slack + (1499.0, 1), + (1500.0, 2), + (9999.0, 2), + ], +) +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_max_models_on_disc( + ensemble_backend: Backend, + max_models_on_disc: int | float, + expected: int, +) -> None: + """ + Parameters + ---------- + ensemble_backend : Backend + The backend to use, relies on setup_3_models + + max_models_on_disc : int | float + The max_models_on_disc param to use + + expected : int + The expected number of selected models + + Expects + ------- + * The number of selected models should be as expected + """ + ensbuilder = EnsembleBuilder( + backend=ensemble_backend, + dataset_name="TEST", + task_type=BINARY_CLASSIFICATION, + metric=roc_auc, + seed=0, # important to find the test files + ensemble_nbest=4, + max_models_on_disc=max_models_on_disc, + ) + + with patch("os.path.getsize") as mock: + mock.return_value = 100 * 1024 * 1024 + ensbuilder.compute_loss_per_model() + sel_keys = ensbuilder.get_n_best_preds() + assert len(sel_keys) == expected + + +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_max_models_on_disc_2(ensemble_backend: Backend) -> None: + # Test for Extreme scenarios + # Make sure that the best predictions are kept + ensbuilder = EnsembleBuilder( + backend=ensemble_backend, + dataset_name="TEST", + task_type=BINARY_CLASSIFICATION, + metric=roc_auc, + seed=DEFAULT_SEED, # important to find the test files + ensemble_nbest=50, + max_models_on_disc=10000.0, + ) + ensbuilder.read_preds = {} + + for n in range(50): + loss = 10 * -n + ensbuilder.read_losses["pred" + str(n)] = { + "ens_loss": loss, + "num_run": n, + "loaded": 1, + "seed": 0, + "disc_space_cost_mb": 50 * n, + } + ensbuilder.read_preds["pred" + str(n)] = {Y_ENSEMBLE: True} + + sel_keys = ensbuilder.get_n_best_preds() + assert ["pred49", "pred48", "pred47"] == sel_keys + + +@parametrize("n_models", [50, 10, 2, 1]) +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_max_models_on_disc_preserves_always_preserves_at_least_one_model( + n_models: int, + ensemble_backend: Backend, +) -> None: + """ + Parameters + ---------- + n_models : int + + ensemble_backend : Backend + + """ + ensbuilder = EnsembleBuilder( + backend=ensemble_backend, + dataset_name="TEST", + task_type=BINARY_CLASSIFICATION, + metric=roc_auc, + seed=DEFAULT_SEED, # important to find the test files + ensemble_nbest=50, + max_models_on_disc=0.0, + ) + + read_losses = { + f"pred{n}": { + "ens_loss": 10 * -n, + "num_run": n + 1, + "loaded": 1, + "seed": 0, + "disc_space_cost_mb": 50 * n, + } + for n in range(n_models) + } + best_model = min(read_losses, key=lambda m: read_losses[m]["ens_loss"]) + + ensbuilder.read_losses = read_losses + ensbuilder.read_preds = {f"pred{n}": {Y_ENSEMBLE: True} for n in range(n_models)} + + sel_keys = ensbuilder.get_n_best_preds() + assert [best_model] == sel_keys + + +@parametrize( + "performance_range_threshold, expected_selected", + ((0.0, 4), (0.1, 4), (0.3, 3), (0.5, 2), (0.6, 2), (0.8, 1), (1.0, 1)), +) +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_performance_range_threshold( + ensemble_backend: Backend, + performance_range_threshold: float, + expected_selected: int, +) -> None: + """ + Parameters + ---------- + ensemble_backend : Backend + The backend to use + + performance_range_threshold : float + THe performance range threshold to use + + expected_selected : int + The number of selected models for there to be + + Expects + ------- + * Expects the given amount of models to be selected given a performance range + threshold. + """ + ensbuilder = EnsembleBuilder( + backend=ensemble_backend, + dataset_name="TEST", + task_type=BINARY_CLASSIFICATION, + metric=roc_auc, + seed=DEFAULT_SEED, # important to find the test files + ensemble_nbest=100, + performance_range_threshold=performance_range_threshold, + ) + ensbuilder.read_losses = { + "A": {"ens_loss": -1, "num_run": 1, "loaded": -1, "seed": 1}, + "B": {"ens_loss": -2, "num_run": 2, "loaded": -1, "seed": 1}, + "C": {"ens_loss": -3, "num_run": 3, "loaded": -1, "seed": 1}, + "D": {"ens_loss": -4, "num_run": 4, "loaded": -1, "seed": 1}, + "E": {"ens_loss": -5, "num_run": 5, "loaded": -1, "seed": 1}, + } + ensbuilder.read_preds = { + name: {preds_key: True for preds_key in (Y_ENSEMBLE, Y_VALID, Y_TEST)} + for name in ensbuilder.read_losses + } + + sel_keys = ensbuilder.get_n_best_preds() + assert len(sel_keys) == expected_selected + + +@parametrize( + "performance_range_threshold, ensemble_nbest, expected_selected", + ( + (0.0, 1, 1), + (0.0, 1.0, 4), + (0.1, 2, 2), + (0.3, 4, 3), + (0.5, 1, 1), + (0.6, 10, 2), + (0.8, 0.5, 1), + (1, 1.0, 1), + ), +) +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_performance_range_threshold_with_ensemble_nbest( + ensemble_backend: Backend, + performance_range_threshold: float, + ensemble_nbest: int | float, + expected_selected: int, +) -> None: + """ + Parameters + ---------- + ensemble_backend : Backend + performance_range_threshold : float + ensemble_nbest : int | float + expected_selected : int + The number of models expected to be selected + + Expects + ------- + * Given the setup of params for test_performance_range_threshold and ensemble_nbest, + the expected number of models should be selected. + """ + ensbuilder = EnsembleBuilder( + backend=ensemble_backend, + dataset_name="TEST", + task_type=BINARY_CLASSIFICATION, + metric=roc_auc, + seed=DEFAULT_SEED, # important to find the test files + ensemble_nbest=ensemble_nbest, + performance_range_threshold=performance_range_threshold, + max_models_on_disc=None, + ) + ensbuilder.read_losses = { + "A": {"ens_loss": -1, "num_run": 1, "loaded": -1, "seed": 1}, + "B": {"ens_loss": -2, "num_run": 2, "loaded": -1, "seed": 1}, + "C": {"ens_loss": -3, "num_run": 3, "loaded": -1, "seed": 1}, + "D": {"ens_loss": -4, "num_run": 4, "loaded": -1, "seed": 1}, + "E": {"ens_loss": -5, "num_run": 5, "loaded": -1, "seed": 1}, + } + ensbuilder.read_preds = { + name: {pred_name: True for pred_name in (Y_ENSEMBLE, Y_VALID, Y_TEST)} + for name in ensbuilder.read_losses + } + sel_keys = ensbuilder.get_n_best_preds() + + assert len(sel_keys) == expected_selected + + +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_fall_back_nbest(ensemble_backend: Backend) -> None: + ensbuilder = EnsembleBuilder( + backend=ensemble_backend, + dataset_name="TEST", + task_type=BINARY_CLASSIFICATION, + metric=roc_auc, + seed=DEFAULT_SEED, # important to find the test files + ensemble_nbest=1, + ) + + ensbuilder.compute_loss_per_model() + print() + print(ensbuilder.read_preds.keys()) + print(ensbuilder.read_losses.keys()) + print(ensemble_backend.temporary_directory) + + for model in ["0_1_0.0", "0_2_0.0", "0_3_100.0"]: + filename = os.path.join( + ensemble_backend.temporary_directory, + f".auto-sklearn/runs/{model}/predictions_ensemble_{model}.npy", + ) + ensbuilder.read_losses[filename]["ens_loss"] = -1 + + sel_keys = ensbuilder.get_n_best_preds() + + best_model = "0_1_0.0" + expected = os.path.join( + ensemble_backend.temporary_directory, + f".auto-sklearn/runs/{best_model}/predictions_ensemble_{best_model}.npy", + ) + + assert len(sel_keys) == 1 + assert sel_keys[0] == expected + + +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_get_valid_test_preds(ensemble_backend: Backend) -> None: + """ + Parameters + ---------- + ensemble_backend : Backend + The ensemble backend to use with the setup_3_models setup + """ + ensbuilder = EnsembleBuilder( + backend=ensemble_backend, + dataset_name="TEST", + task_type=BINARY_CLASSIFICATION, + metric=roc_auc, + seed=DEFAULT_SEED, # important to find the test files + ensemble_nbest=1, + ) + + # There are 3 models in the setup + # * Run 1 is the dummy run + # * Run 2 and Run 3 share the same predictions + # -> Run 2 is selected with ensemble_nbest = 1 + paths = [ + os.path.join( + ensemble_backend.temporary_directory, + f".auto-sklearn/runs/{model}/predictions_ensemble_{model}.npy", + ) + for model in ["0_1_0.0", "0_2_0.0", "0_3_100.0"] + ] + + ensbuilder.compute_loss_per_model() + + sel_keys = ensbuilder.get_n_best_preds() + assert len(sel_keys) == 1 + + ensbuilder.get_valid_test_preds(selected_keys=sel_keys) + + # Number of read files should be three and contain those of the models in the setup + assert set(ensbuilder.read_preds.keys()) == set(paths) + + selected = sel_keys + non_selected = set(paths) - set(sel_keys) + + # not selected --> should still be None + for key in non_selected: + assert ensbuilder.read_preds[key][Y_VALID] is None + assert ensbuilder.read_preds[key][Y_TEST] is None + + # selected --> read valid and test predictions + for key in selected: + assert ensbuilder.read_preds[key][Y_VALID] is not None + assert ensbuilder.read_preds[key][Y_TEST] is not None + + +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_ensemble_builder_predictions(ensemble_backend: Backend) -> None: + """ + Parameters + ---------- + ensemble_backend : Backend + The ensemble backend to use with the setup_3_models setup + + Expects + ------- + * The validation and test sets should both have equal predictions for them? + * Since model 0_2_0.0 has predictions exactly equal to the targets, it should + recieve full weight and that the predictions should be identical to that models + predictions + """ + ensbuilder = EnsembleBuilder( + backend=ensemble_backend, + dataset_name="TEST", + task_type=BINARY_CLASSIFICATION, + metric=roc_auc, + seed=DEFAULT_SEED, # important to find the test files + ensemble_nbest=2, + ) + ensbuilder.SAVE2DISC = False + + ensbuilder.compute_loss_per_model() + + d2 = os.path.join( + ensemble_backend.temporary_directory, + ".auto-sklearn/runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy", + ) + + sel_keys = ensbuilder.get_n_best_preds() + assert len(sel_keys) > 0 + + ensemble = ensbuilder.fit_ensemble(selected_keys=sel_keys) + print(ensemble, sel_keys) + + n_sel_valid, n_sel_test = ensbuilder.get_valid_test_preds(selected_keys=sel_keys) + + # both valid and test prediction files are available + assert len(n_sel_valid) > 0 + assert n_sel_valid == n_sel_test + + y_valid = ensbuilder.predict( + set_="valid", + ensemble=ensemble, + selected_keys=n_sel_valid, + n_preds=len(sel_keys), + index_run=1, + ) + y_test = ensbuilder.predict( + set_="test", + ensemble=ensemble, + selected_keys=n_sel_test, + n_preds=len(sel_keys), + index_run=1, + ) + + # predictions for valid and test are the same + # --> should results in the same predictions + np.testing.assert_array_almost_equal(y_valid, y_test) + + # since d2 provides perfect predictions + # it should get a higher weight + # so that y_valid should be exactly y_valid_d2 + y_valid_d2 = ensbuilder.read_preds[d2][Y_VALID][:, 1] + np.testing.assert_array_almost_equal(y_valid, y_valid_d2) + + +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_main(ensemble_backend: Backend) -> None: + """ + Parameters + ---------- + ensemble_backend : Backend + The ensemble_backend to use, this test relies on this specific case + + Expects + ------- + * There should be "read_preds" and "read_losses" saved to file + * There should be 3 model reads + * There should be a hash for the preds read in + * The true targets should have been read in + * The length of the history returned by run should be the same as the iterations + performed. + * The run history should contain "optimization", "val" and "test" scores, each being + the same at 1.0 due to the setup of "setup_3_models". + """ + iters = 1 + + ensbuilder = EnsembleBuilder( + backend=ensemble_backend, + dataset_name="TEST", + task_type=BINARY_CLASSIFICATION, + metric=roc_auc, + seed=DEFAULT_SEED, # important to find the test files + ) + + run_history, ensemble_nbest, _, _, _ = ensbuilder.main( + time_left=np.inf, + iteration=iters, + return_predictions=False, + ) + + internals_dir = Path(ensemble_backend.internals_directory) + read_preds_path = internals_dir / "ensemble_read_preds.pkl" + read_losses_path = internals_dir / "ensemble_read_losses.pkl" + + assert read_preds_path.exists(), list(internals_dir.iterdir()) + assert read_losses_path.exists(), list(internals_dir.iterdir()) + + # There should be three preds read + assert len(ensbuilder.read_preds) == 3 + assert ensbuilder.last_hash is not None + assert ensbuilder.y_true_ensemble is not None + + # We expect as many iterations as the iters param + assert len(run_history) == iters + hist_item = run_history[0] + + # As the data loader loads the same val/train/test + # we expect 1.0 as score and all keys available + expected_performance = { + "ensemble_val_score": 1.0, + "ensemble_test_score": 1.0, + "ensemble_optimization_score": 1.0, + } + + assert all(key in hist_item for key in expected_performance) + assert all(hist_item[key] == score for key, score in expected_performance.items()) + assert "Timestamp" in hist_item + + +@parametrize("time_buffer", [1, 5]) +@parametrize("duration", [10, 20]) +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_run_end_at(ensemble_backend: Backend, time_buffer: int, duration: int) -> None: + """ + Parameters + ---------- + ensemble_backend : Backend + The backend to use + + time_buffer: int + How much time buffer to give to the ensemble builder + + duration: int + How long to run the ensemble builder for + + Expects + ------- + * The limits enforced by pynisher should account for the time_buffer and duration + to run for + a little bit of overhead that gets rounded to a second. + """ + with patch("pynisher.enforce_limits") as pynisher_mock: + ensbuilder = EnsembleBuilder( + backend=ensemble_backend, + dataset_name="TEST", + task_type=BINARY_CLASSIFICATION, + metric=roc_auc, + ) + + ensbuilder.run( + end_at=time.time() + duration, + iteration=1, + time_buffer=time_buffer, + pynisher_context="forkserver", + ) + + # The 1 comes from the small overhead in conjuction with rounding down + expected = duration - time_buffer - 1 + assert pynisher_mock.call_args_list[0][1]["wall_time_in_s"] == expected + + +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_limit( + ensemble_backend: Backend, + mock_logger: MockLogger, +) -> None: + """ + + Parameters + ---------- + ensemble_backend : Backend + The backend setup to use + + Fixtures + -------- + mock_logger: MockLogger + A logger to inject into the EnsembleBuilder for tracking calls + + Expects + ------- + * Running from (ensemble_nbest, read_at_most) = (10, 5) where a memory exception + occurs in each run, we expect ensemble_nbest to be halved continuously until + it reaches 0, at which point read_at_most is reduced directly to 1. + """ + expected_states = [(10, 5), (5, 5), (2, 5), (1, 5), (0, 1)] + + starting_state = expected_states[0] + intermediate_states = expected_states[1:-1] + final_state = expected_states[-1] + + starting_nbest, starting_read_at_most = starting_state + + ensbuilder = EnsembleBuilder( + backend=ensemble_backend, + dataset_name="TEST", + task_type=BINARY_CLASSIFICATION, + metric=roc_auc, + seed=DEFAULT_SEED, # important to find the test files + ensemble_nbest=starting_nbest, + read_at_most=starting_read_at_most, + memory_limit=1, + ) + + # Force a memory error to occur + ensbuilder.predict = Mock(side_effect=MemoryError) # type: ignore + ensbuilder.logger = mock_logger # Mock its logger + ensbuilder.SAVE2DISC = False + + internal_dir = Path(ensemble_backend.internals_directory) + read_losses_file = internal_dir / "ensemble_read_losses.pkl" + read_preds_file = internal_dir / "ensemble_read_preds.pkl" + + def mtime_mock(filename: str) -> float: + """TODO, not really sure why we have to force these""" + path = Path(filename) + mtimes = { + # At second 0 + "predictions_ensemble_0_1_0.0.npy": 0.0, + "predictions_valid_0_1_0.0.npy": 0.1, + "predictions_test_0_1_0.0.npy": 0.2, + # At second 1 + "predictions_ensemble_0_2_0.0.npy": 1.0, + "predictions_valid_0_2_0.0.npy": 1.1, + "predictions_test_0_2_0.0.npy": 1.2, + # At second 2 + "predictions_ensemble_0_3_100.0.npy": 2.0, + "predictions_valid_0_3_100.0.npy": 2.1, + "predictions_test_0_3_100.0.npy": 2.2, + } + return mtimes[path.name] + + with patch("os.path.getmtime") as mtime: + mtime.side_effect = mtime_mock + + starting_state = (starting_nbest, starting_read_at_most) + assert (ensbuilder.ensemble_nbest, ensbuilder.read_at_most) == starting_state + + intermediate_states = [(5, 5), (2, 5), (1, 5), (0, 1)] + for i, exp_state in enumerate(intermediate_states, start=1): + ensbuilder.run(time_left=1000, iteration=0, pynisher_context="fork") + + assert read_losses_file.exists() + assert not read_preds_file.exists() + + assert mock_logger.warning.call_count == i # type: ignore + + assert (ensbuilder.ensemble_nbest, ensbuilder.read_at_most) == exp_state + + # At this point, when we've reached (ensemble_nbest, read_at_most) = (0, 1), + # we can still run the ensbulder but it should just raise an error and not + # change it's internal state + ensbuilder.run(time_left=1000, iteration=0, pynisher_context="fork") + + assert read_losses_file.exists() + assert not read_preds_file.exists() + + assert (ensbuilder.ensemble_nbest, ensbuilder.read_at_most) == final_state + + warning_call_count = mock_logger.warning.call_count # type: ignore + error_call_count = mock_logger.error.call_count # type: ignore + + assert warning_call_count == len(intermediate_states) + assert error_call_count == 1 + + for call_arg in mock_logger.error.call_args_list: # type: ignore + assert "Memory Exception -- Unable to further reduce" in str(call_arg) + + +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_read_pickle_read_preds(ensemble_backend: Backend) -> None: + """ + Parameters + ---------- + ensemble_backend : Backend + THe ensemble backend to use + + Expects + ------- + * The read_losses and read_preds should be cached between creation of + the EnsembleBuilder. + """ + ensbuilder = EnsembleBuilder( + backend=ensemble_backend, + dataset_name="TEST", + task_type=BINARY_CLASSIFICATION, + metric=roc_auc, + seed=DEFAULT_SEED, # important to find the test files + ensemble_nbest=2, + max_models_on_disc=None, + ) + ensbuilder.SAVE2DISC = False + + ensbuilder.main(time_left=np.inf, iteration=1, return_predictions=False) + + # Check that the memory was created + internal_dir = Path(ensemble_backend.internals_directory) + losses_file = internal_dir / "ensemble_read_losses.pkl" + memory_file = internal_dir / "ensemble_read_preds.pkl" + + assert memory_file.exists() + + # Make sure we pickle the correct read preads and hash + with memory_file.open("rb") as memory: + read_preds, last_hash = pickle.load(memory) + + def assert_equal_read_preds(a: dict, b: dict) -> None: + """ + * Keys are check to be the same at each depth + * Any ndarray as check for equality with numpy + * Everything else is checked with regular equality + """ + # Both arrays should have the same splits + assert set(a.keys()) == set(b.keys()) + + for k in a.keys(): + if isinstance(a[k], dict): + assert_equal_read_preds(a[k], b[k]) + elif isinstance(a[k], np.ndarray): + np.testing.assert_array_equal(a[k], b[k]) + else: + assert a[k] == b[k], f"Key: {k}" + + assert_equal_read_preds(read_preds, ensbuilder.read_preds) + assert last_hash == ensbuilder.last_hash + + assert losses_file.exists() + + # Make sure we pickle the correct read scores + with losses_file.open("rb") as memory: + read_losses = pickle.load(memory) + + assert_equal_read_preds(read_losses, ensbuilder.read_losses) + + # Then create a new instance, which should automatically read this file + ensbuilder2 = EnsembleBuilder( + backend=ensemble_backend, + dataset_name="TEST", + task_type=BINARY_CLASSIFICATION, + metric=roc_auc, + seed=0, # important to find the test files + ensemble_nbest=2, + max_models_on_disc=None, + ) + assert_equal_read_preds(ensbuilder2.read_preds, ensbuilder.read_preds) + assert_equal_read_preds(ensbuilder2.read_losses, ensbuilder.read_losses) + assert ensbuilder2.last_hash == ensbuilder.last_hash + + +@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +def test_ensemble_builder_process_realrun( + ensemble_backend: Backend, + make_dask_client: Callable[..., dask.distributed.Client], +) -> None: + """ + + Parameters + ---------- + ensemble_backend : Backend + The backend to use, doesn't really matter which kind + + Fixtures + -------- + make_dask_client : Callable[..., [dask.distributed.Client]] + + Expects + ------- + * With 1 iteration, the history should only be of length one + * The expected ensmble score keys for "optimization", "valid" and "test" should + be in the one history item. + * The "Timestamp" key should be in the history item + * With a metric that always returns 0.9, each ensemble score should be 0.9 in the + history item + """ + dask_client = make_dask_client(n_workers=1) + mock_metric = make_scorer("mock", lambda x, y: 0.9) + iterations = 1 + + manager = EnsembleBuilderManager( + start_time=time.time(), + time_left_for_ensembles=1000, + backend=ensemble_backend, + dataset_name="Test", + task=BINARY_CLASSIFICATION, + metric=mock_metric, + ensemble_size=50, + ensemble_nbest=10, + max_models_on_disc=None, + seed=DEFAULT_SEED, + precision=32, + max_iterations=iterations, + read_at_most=np.inf, + ensemble_memory_limit=None, + random_state=0, + ) + manager.build_ensemble(dask_client) + future = manager.futures.pop() + dask.distributed.wait([future]) # wait for the ensemble process to finish + + result = future.result() + history, _, _, _, _ = result + + assert len(history) == iterations + + hist_item = history[0] + + expected_scores = { + f"ensemble_{key}_score": 0.9 for key in ["optimization", "val", "test"] + } + + assert "Timestamp" in hist_item + assert all(key in hist_item for key in expected_scores) + assert all(hist_item[key] == expected_scores[key] for key in expected_scores) diff --git a/test/test_ensemble_builder/test_ensemble.py b/test/test_ensemble_builder/test_ensemble.py index 012e110ce6..db4a3a09b3 100644 --- a/test/test_ensemble_builder/test_ensemble.py +++ b/test/test_ensemble_builder/test_ensemble.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any, Callable, Tuple +from typing import Callable import os import pickle @@ -9,12 +9,7 @@ import dask.distributed import numpy as np -import pandas as pd -from smac.runhistory.runhistory import RunHistory, RunKey, RunValue -from autosklearn.automl_common.common.ensemble_building.abstract_ensemble import ( - AbstractEnsemble, -) from autosklearn.automl_common.common.utils.backend import Backend from autosklearn.constants import BINARY_CLASSIFICATION from autosklearn.ensemble_builder import ( @@ -24,44 +19,14 @@ EnsembleBuilder, EnsembleBuilderManager, ) -from autosklearn.ensembles.singlebest_ensemble import SingleBest -from autosklearn.metrics import Scorer, accuracy, log_loss, make_scorer, roc_auc +from autosklearn.metrics import make_scorer, roc_auc from pytest_cases import parametrize, parametrize_with_cases from unittest.mock import Mock, patch import test.test_ensemble_builder.cases as cases -from test.fixtures.logging import MockLogger from test.conftest import DEFAULT_SEED - - -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_read(ensemble_backend: Backend) -> None: - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ) - - success = ensbuilder.compute_loss_per_model() - assert success, f"read_preds = {str(ensbuilder.read_preds)}" - - assert len(ensbuilder.read_preds) == 3, ensbuilder.read_preds.keys() - assert len(ensbuilder.read_losses) == 3, ensbuilder.read_losses.keys() - - filename = os.path.join( - ensemble_backend.temporary_directory, - ".auto-sklearn/runs/0_1_0.0/predictions_ensemble_0_1_0.0.npy", - ) - assert ensbuilder.read_losses[filename]["ens_loss"] == 0.5 - - filename = os.path.join( - ensemble_backend.temporary_directory, - ".auto-sklearn/runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy", - ) - assert ensbuilder.read_losses[filename]["ens_loss"] == 0.0 +from test.fixtures.logging import MockLogger @parametrize( @@ -119,11 +84,11 @@ def test_nbest( assert len(sel_keys) == expected - expected = os.path.join( + expected_sel = os.path.join( ensemble_backend.temporary_directory, ".auto-sklearn/runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy", ) - assert sel_keys[0] == expected + assert sel_keys[0] == expected_sel @parametrize( @@ -563,8 +528,8 @@ def test_main(ensemble_backend: Backend) -> None: ) internals_dir = Path(ensemble_backend.internals_directory) - read_preds_path = (internals_dir / "ensemble_read_preds.pkl") - read_losses_path = (internals_dir / "ensemble_read_losses.pkl") + read_preds_path = internals_dir / "ensemble_read_preds.pkl" + read_losses_path = internals_dir / "ensemble_read_losses.pkl" assert read_preds_path.exists(), list(internals_dir.iterdir()) assert read_losses_path.exists(), list(internals_dir.iterdir()) @@ -674,7 +639,8 @@ def test_limit( memory_limit=1, ) - ensbuilder.predict = Mock(side_effect=MemoryError) # Force a memory error + # Force a memory error to occur + ensbuilder.predict = Mock(side_effect=MemoryError) # type: ignore ensbuilder.logger = mock_logger # Mock its logger ensbuilder.SAVE2DISC = False @@ -714,7 +680,7 @@ def mtime_mock(filename: str) -> float: assert read_losses_file.exists() assert not read_preds_file.exists() - assert mock_logger.warning.call_count == i + assert mock_logger.warning.call_count == i # type: ignore assert (ensbuilder.ensemble_nbest, ensbuilder.read_at_most) == exp_state @@ -728,10 +694,13 @@ def mtime_mock(filename: str) -> float: assert (ensbuilder.ensemble_nbest, ensbuilder.read_at_most) == final_state - assert mock_logger.warning.call_count == len(intermediate_states) - assert mock_logger.error.call_count == 1, mock_logger.error.call_args_list + warning_call_count = mock_logger.warning.call_count # type: ignore + error_call_count = mock_logger.error.call_count # type: ignore + + assert warning_call_count == len(intermediate_states) + assert error_call_count == 1 - for call_arg in mock_logger.error.call_args_list: + for call_arg in mock_logger.error.call_args_list: # type: ignore assert "Memory Exception -- Unable to further reduce" in str(call_arg) @@ -818,7 +787,7 @@ def assert_equal_read_preds(a: dict, b: dict) -> None: @parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) def test_ensemble_builder_process_realrun( ensemble_backend: Backend, - make_dask_client: Callable[..., [dask.distributed.Client]], + make_dask_client: Callable[..., dask.distributed.Client], ) -> None: """ @@ -879,68 +848,3 @@ def test_ensemble_builder_process_realrun( assert "Timestamp" in hist_item assert all(key in hist_item for key in expected_scores) assert all(hist_item[key] == expected_scores[key] for key in expected_scores) - - -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_ensemble_builder_nbest_remembered( - ensemble_backend: Backend, - make_dask_client: Callable[..., [dask.distributed.Client]], -) -> None: - """ - Parameters - ---------- - ensemble_backend: Backend - The backend to use, relies on the 3 setup models - - Fixtures - -------- - make_dask_client: (...) -> Client - Make a dask client - - Expects - ------- - * The read_preds file should not be created - * The ensemble_nbest should be remembered and reduced between runs - TODO Note sure why there would be a reduction and how these numbers were made - - Last Note - --------- - "Makes sure ensemble builder returns the size of the ensemble that pynisher allowed - This way, we can remember it and not waste more time trying big ensemble sizes" - """ - dask_client = make_dask_client(n_workers=1) - - manager = EnsembleBuilderManager( - start_time=time.time(), - time_left_for_ensembles=1000, - backend=ensemble_backend, - dataset_name="Test", - task=BINARY_CLASSIFICATION, - metric=roc_auc, - ensemble_size=50, - ensemble_nbest=10, - max_models_on_disc=None, - seed=0, - precision=32, - read_at_most=np.inf, - ensemble_memory_limit=1000, - random_state=0, - max_iterations=None, - ) - - filepath = Path(ensemble_backend.internals_directory) / "ensemble_read_preds.pkl" - - manager.build_ensemble(dask_client, unit_test=True) - future = manager.futures[0] - dask.distributed.wait([future]) # wait for the ensemble process to finish - - assert future.result() == ([], 5, None, None, None) - - assert not filepath.exists() - - manager.build_ensemble(dask_client, unit_test=True) - future = manager.futures[0] - dask.distributed.wait([future]) # wait for the ensemble process to finish - - assert not filepath.exists() - assert future.result() == ([], 2, None, None, None) diff --git a/test/test_ensemble_builder/test_ensemble_manager.py b/test/test_ensemble_builder/test_ensemble_manager.py new file mode 100644 index 0000000000..e3b7296ba5 --- /dev/null +++ b/test/test_ensemble_builder/test_ensemble_manager.py @@ -0,0 +1,81 @@ +from typing import Callable + +import time +from pathlib import Path + +import dask.distributed +import numpy as np + +from autosklearn.automl_common.common.utils.backend import Backend +from autosklearn.constants import BINARY_CLASSIFICATION +from autosklearn.ensemble_builder import EnsembleBuilderManager +from autosklearn.metrics import roc_auc + +from pytest_cases import parametrize_with_cases + +import test.test_ensemble_builder.cases as cases + + +@parametrize_with_cases("ensemble_backend", cases=cases) +def test_ensemble_builder_nbest_remembered( + ensemble_backend: Backend, + make_dask_client: Callable[..., [dask.distributed.Client]], +) -> None: + """ + Parameters + ---------- + ensemble_backend: Backend + The backend to use, relies on the 3 setup models + + Fixtures + -------- + make_dask_client: (...) -> Client + Make a dask client + + Expects + ------- + * The read_preds file should not be created + * The ensemble_nbest should be remembered and reduced between runs + TODO Note sure why there would be a reduction and how these numbers were made + + Last Note + --------- + "Makes sure ensemble builder returns the size of the ensemble that pynisher allowed + This way, we can remember it and not waste more time trying big ensemble sizes" + """ + dask_client = make_dask_client(n_workers=1) + + manager = EnsembleBuilderManager( + start_time=time.time(), + time_left_for_ensembles=1000, + backend=ensemble_backend, + dataset_name="Test", + task=BINARY_CLASSIFICATION, + metric=roc_auc, + ensemble_size=50, + ensemble_nbest=10, + max_models_on_disc=None, + seed=0, + precision=32, + read_at_most=np.inf, + ensemble_memory_limit=1000, + random_state=0, + max_iterations=None, + ) + + filepath = Path(ensemble_backend.internals_directory) / "ensemble_read_preds.pkl" + + manager.build_ensemble(dask_client, unit_test=True) + future = manager.futures[0] + dask.distributed.wait([future]) # wait for the ensemble process to finish + + assert future.result() == ([], 5, None, None, None) + + assert not filepath.exists() + + manager.build_ensemble(dask_client, unit_test=True) + future = manager.futures[0] + dask.distributed.wait([future]) # wait for the ensemble process to finish + + assert not filepath.exists() + assert future.result() == ([], 2, None, None, None) diff --git a/test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.np b/test/test_ensemble_builder/toy_data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.np deleted file mode 100644 index fee3160c86d8995cb5ece8126aae88f13a964629..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g From a20150c2ca99eab21726ec36768404672347288d Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 27 Mar 2022 18:12:21 +0200 Subject: [PATCH 006/117] Readd missing file which failed test for `case_3_models` --- .../test_3_models/cases.py | 4 + .../runs/0_2_0.0/predictions_test_0_2_0.0.np | Bin 0 -> 160 bytes .../test_3_models/test_3_models.py | 403 +----------------- test/test_ensemble_builder/test_ensemble.py | 63 --- 4 files changed, 17 insertions(+), 453 deletions(-) create mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.np diff --git a/test/test_ensemble_builder/test_3_models/cases.py b/test/test_ensemble_builder/test_3_models/cases.py index f6fb826f73..3a103e1059 100644 --- a/test/test_ensemble_builder/test_3_models/cases.py +++ b/test/test_ensemble_builder/test_3_models/cases.py @@ -44,6 +44,10 @@ # Datamanager The datamanager contains the iris dataset as the above numbers are made up with no real corresponding models so the data from the datamanager can not be faked so easily. + +# Extra Notes +The extra `predictions_test_0_2_0.0.np` are required to make `test_max_models_on_disc` +pass as it factors into the memory estimation. Should probably fix that. """ from typing import Callable diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.np b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.np new file mode 100644 index 0000000000000000000000000000000000000000..fee3160c86d8995cb5ece8126aae88f13a964629 GIT binary patch literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g literal 0 HcmV?d00001 diff --git a/test/test_ensemble_builder/test_3_models/test_3_models.py b/test/test_ensemble_builder/test_3_models/test_3_models.py index 5c1a35436c..2940edfeab 100644 --- a/test/test_ensemble_builder/test_3_models/test_3_models.py +++ b/test/test_ensemble_builder/test_3_models/test_3_models.py @@ -1,25 +1,19 @@ from __future__ import annotations -from typing import Callable import os -import pickle -import time from pathlib import Path -import dask.distributed import numpy as np from autosklearn.automl_common.common.utils.backend import Backend from autosklearn.constants import BINARY_CLASSIFICATION from autosklearn.ensemble_builder import ( - Y_ENSEMBLE, Y_TEST, Y_VALID, EnsembleBuilder, - EnsembleBuilderManager, ) -from autosklearn.metrics import make_scorer, roc_auc +from autosklearn.metrics import roc_auc from pytest_cases import parametrize, parametrize_with_cases from unittest.mock import Mock, patch @@ -29,7 +23,7 @@ from test.fixtures.logging import MockLogger -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +@parametrize_with_cases("ensemble_backend", cases=cases) def test_read(ensemble_backend: Backend) -> None: ensbuilder = EnsembleBuilder( backend=ensemble_backend, @@ -46,11 +40,13 @@ def test_read(ensemble_backend: Backend) -> None: assert len(ensbuilder.read_losses) == 3, ensbuilder.read_losses.keys() runsdir = Path(ensemble_backend.get_runs_directory()) - preds_1 = runsdir / "predictions_ensemble_0_1_0.0.npy" - preds_2 = runsdir / "predictions_ensemble_0_2_0.0.npy" + preds_1 = runsdir / "0_1_0.0" / "predictions_ensemble_0_1_0.0.npy" + preds_2 = runsdir / "0_2_0.0" / "predictions_ensemble_0_2_0.0.npy" + preds_3 = runsdir / "0_3_100.0" / "predictions_ensemble_0_3_100.0.npy" assert ensbuilder.read_losses[str(preds_1)]["ens_loss"] == 0.5 assert ensbuilder.read_losses[str(preds_2)]["ens_loss"] == 0.0 + assert ensbuilder.read_losses[str(preds_3)]["ens_loss"] == 0.0 @parametrize( @@ -64,7 +60,7 @@ def test_read(ensemble_backend: Backend) -> None: (2, 1, 1), ), ) -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +@parametrize_with_cases("ensemble_backend", cases=cases) def test_nbest( ensemble_backend: Backend, ensemble_nbest: int | float, @@ -133,7 +129,7 @@ def test_nbest( (9999.0, 2), ], ) -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +@parametrize_with_cases("ensemble_backend", cases=cases) def test_max_models_on_disc( ensemble_backend: Backend, max_models_on_disc: int | float, @@ -172,192 +168,7 @@ def test_max_models_on_disc( assert len(sel_keys) == expected -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_max_models_on_disc_2(ensemble_backend: Backend) -> None: - # Test for Extreme scenarios - # Make sure that the best predictions are kept - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=50, - max_models_on_disc=10000.0, - ) - ensbuilder.read_preds = {} - - for n in range(50): - loss = 10 * -n - ensbuilder.read_losses["pred" + str(n)] = { - "ens_loss": loss, - "num_run": n, - "loaded": 1, - "seed": 0, - "disc_space_cost_mb": 50 * n, - } - ensbuilder.read_preds["pred" + str(n)] = {Y_ENSEMBLE: True} - - sel_keys = ensbuilder.get_n_best_preds() - assert ["pred49", "pred48", "pred47"] == sel_keys - - -@parametrize("n_models", [50, 10, 2, 1]) -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_max_models_on_disc_preserves_always_preserves_at_least_one_model( - n_models: int, - ensemble_backend: Backend, -) -> None: - """ - Parameters - ---------- - n_models : int - - ensemble_backend : Backend - - """ - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=50, - max_models_on_disc=0.0, - ) - - read_losses = { - f"pred{n}": { - "ens_loss": 10 * -n, - "num_run": n + 1, - "loaded": 1, - "seed": 0, - "disc_space_cost_mb": 50 * n, - } - for n in range(n_models) - } - best_model = min(read_losses, key=lambda m: read_losses[m]["ens_loss"]) - - ensbuilder.read_losses = read_losses - ensbuilder.read_preds = {f"pred{n}": {Y_ENSEMBLE: True} for n in range(n_models)} - - sel_keys = ensbuilder.get_n_best_preds() - assert [best_model] == sel_keys - - -@parametrize( - "performance_range_threshold, expected_selected", - ((0.0, 4), (0.1, 4), (0.3, 3), (0.5, 2), (0.6, 2), (0.8, 1), (1.0, 1)), -) -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_performance_range_threshold( - ensemble_backend: Backend, - performance_range_threshold: float, - expected_selected: int, -) -> None: - """ - Parameters - ---------- - ensemble_backend : Backend - The backend to use - - performance_range_threshold : float - THe performance range threshold to use - - expected_selected : int - The number of selected models for there to be - - Expects - ------- - * Expects the given amount of models to be selected given a performance range - threshold. - """ - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=100, - performance_range_threshold=performance_range_threshold, - ) - ensbuilder.read_losses = { - "A": {"ens_loss": -1, "num_run": 1, "loaded": -1, "seed": 1}, - "B": {"ens_loss": -2, "num_run": 2, "loaded": -1, "seed": 1}, - "C": {"ens_loss": -3, "num_run": 3, "loaded": -1, "seed": 1}, - "D": {"ens_loss": -4, "num_run": 4, "loaded": -1, "seed": 1}, - "E": {"ens_loss": -5, "num_run": 5, "loaded": -1, "seed": 1}, - } - ensbuilder.read_preds = { - name: {preds_key: True for preds_key in (Y_ENSEMBLE, Y_VALID, Y_TEST)} - for name in ensbuilder.read_losses - } - - sel_keys = ensbuilder.get_n_best_preds() - assert len(sel_keys) == expected_selected - - -@parametrize( - "performance_range_threshold, ensemble_nbest, expected_selected", - ( - (0.0, 1, 1), - (0.0, 1.0, 4), - (0.1, 2, 2), - (0.3, 4, 3), - (0.5, 1, 1), - (0.6, 10, 2), - (0.8, 0.5, 1), - (1, 1.0, 1), - ), -) -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_performance_range_threshold_with_ensemble_nbest( - ensemble_backend: Backend, - performance_range_threshold: float, - ensemble_nbest: int | float, - expected_selected: int, -) -> None: - """ - Parameters - ---------- - ensemble_backend : Backend - performance_range_threshold : float - ensemble_nbest : int | float - expected_selected : int - The number of models expected to be selected - - Expects - ------- - * Given the setup of params for test_performance_range_threshold and ensemble_nbest, - the expected number of models should be selected. - """ - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=ensemble_nbest, - performance_range_threshold=performance_range_threshold, - max_models_on_disc=None, - ) - ensbuilder.read_losses = { - "A": {"ens_loss": -1, "num_run": 1, "loaded": -1, "seed": 1}, - "B": {"ens_loss": -2, "num_run": 2, "loaded": -1, "seed": 1}, - "C": {"ens_loss": -3, "num_run": 3, "loaded": -1, "seed": 1}, - "D": {"ens_loss": -4, "num_run": 4, "loaded": -1, "seed": 1}, - "E": {"ens_loss": -5, "num_run": 5, "loaded": -1, "seed": 1}, - } - ensbuilder.read_preds = { - name: {pred_name: True for pred_name in (Y_ENSEMBLE, Y_VALID, Y_TEST)} - for name in ensbuilder.read_losses - } - sel_keys = ensbuilder.get_n_best_preds() - - assert len(sel_keys) == expected_selected - - -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +@parametrize_with_cases("ensemble_backend", cases=cases) def test_fall_back_nbest(ensemble_backend: Backend) -> None: ensbuilder = EnsembleBuilder( backend=ensemble_backend, @@ -393,7 +204,7 @@ def test_fall_back_nbest(ensemble_backend: Backend) -> None: assert sel_keys[0] == expected -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +@parametrize_with_cases("ensemble_backend", cases=cases) def test_get_valid_test_preds(ensemble_backend: Backend) -> None: """ Parameters @@ -446,7 +257,7 @@ def test_get_valid_test_preds(ensemble_backend: Backend) -> None: assert ensbuilder.read_preds[key][Y_TEST] is not None -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +@parametrize_with_cases("ensemble_backend", cases=cases) def test_ensemble_builder_predictions(ensemble_backend: Backend) -> None: """ Parameters @@ -516,7 +327,7 @@ def test_ensemble_builder_predictions(ensemble_backend: Backend) -> None: np.testing.assert_array_almost_equal(y_valid, y_valid_d2) -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +@parametrize_with_cases("ensemble_backend", cases=cases) def test_main(ensemble_backend: Backend) -> None: """ Parameters @@ -580,48 +391,7 @@ def test_main(ensemble_backend: Backend) -> None: assert "Timestamp" in hist_item -@parametrize("time_buffer", [1, 5]) -@parametrize("duration", [10, 20]) -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_run_end_at(ensemble_backend: Backend, time_buffer: int, duration: int) -> None: - """ - Parameters - ---------- - ensemble_backend : Backend - The backend to use - - time_buffer: int - How much time buffer to give to the ensemble builder - - duration: int - How long to run the ensemble builder for - - Expects - ------- - * The limits enforced by pynisher should account for the time_buffer and duration - to run for + a little bit of overhead that gets rounded to a second. - """ - with patch("pynisher.enforce_limits") as pynisher_mock: - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - ) - - ensbuilder.run( - end_at=time.time() + duration, - iteration=1, - time_buffer=time_buffer, - pynisher_context="forkserver", - ) - - # The 1 comes from the small overhead in conjuction with rounding down - expected = duration - time_buffer - 1 - assert pynisher_mock.call_args_list[0][1]["wall_time_in_s"] == expected - - -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) +@parametrize_with_cases("ensemble_backend", cases=cases) def test_limit( ensemble_backend: Backend, mock_logger: MockLogger, @@ -666,7 +436,6 @@ def test_limit( # Force a memory error to occur ensbuilder.predict = Mock(side_effect=MemoryError) # type: ignore ensbuilder.logger = mock_logger # Mock its logger - ensbuilder.SAVE2DISC = False internal_dir = Path(ensemble_backend.internals_directory) read_losses_file = internal_dir / "ensemble_read_losses.pkl" @@ -726,149 +495,3 @@ def mtime_mock(filename: str) -> float: for call_arg in mock_logger.error.call_args_list: # type: ignore assert "Memory Exception -- Unable to further reduce" in str(call_arg) - - -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_read_pickle_read_preds(ensemble_backend: Backend) -> None: - """ - Parameters - ---------- - ensemble_backend : Backend - THe ensemble backend to use - - Expects - ------- - * The read_losses and read_preds should be cached between creation of - the EnsembleBuilder. - """ - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=2, - max_models_on_disc=None, - ) - ensbuilder.SAVE2DISC = False - - ensbuilder.main(time_left=np.inf, iteration=1, return_predictions=False) - - # Check that the memory was created - internal_dir = Path(ensemble_backend.internals_directory) - losses_file = internal_dir / "ensemble_read_losses.pkl" - memory_file = internal_dir / "ensemble_read_preds.pkl" - - assert memory_file.exists() - - # Make sure we pickle the correct read preads and hash - with memory_file.open("rb") as memory: - read_preds, last_hash = pickle.load(memory) - - def assert_equal_read_preds(a: dict, b: dict) -> None: - """ - * Keys are check to be the same at each depth - * Any ndarray as check for equality with numpy - * Everything else is checked with regular equality - """ - # Both arrays should have the same splits - assert set(a.keys()) == set(b.keys()) - - for k in a.keys(): - if isinstance(a[k], dict): - assert_equal_read_preds(a[k], b[k]) - elif isinstance(a[k], np.ndarray): - np.testing.assert_array_equal(a[k], b[k]) - else: - assert a[k] == b[k], f"Key: {k}" - - assert_equal_read_preds(read_preds, ensbuilder.read_preds) - assert last_hash == ensbuilder.last_hash - - assert losses_file.exists() - - # Make sure we pickle the correct read scores - with losses_file.open("rb") as memory: - read_losses = pickle.load(memory) - - assert_equal_read_preds(read_losses, ensbuilder.read_losses) - - # Then create a new instance, which should automatically read this file - ensbuilder2 = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=0, # important to find the test files - ensemble_nbest=2, - max_models_on_disc=None, - ) - assert_equal_read_preds(ensbuilder2.read_preds, ensbuilder.read_preds) - assert_equal_read_preds(ensbuilder2.read_losses, ensbuilder.read_losses) - assert ensbuilder2.last_hash == ensbuilder.last_hash - - -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_ensemble_builder_process_realrun( - ensemble_backend: Backend, - make_dask_client: Callable[..., dask.distributed.Client], -) -> None: - """ - - Parameters - ---------- - ensemble_backend : Backend - The backend to use, doesn't really matter which kind - - Fixtures - -------- - make_dask_client : Callable[..., [dask.distributed.Client]] - - Expects - ------- - * With 1 iteration, the history should only be of length one - * The expected ensmble score keys for "optimization", "valid" and "test" should - be in the one history item. - * The "Timestamp" key should be in the history item - * With a metric that always returns 0.9, each ensemble score should be 0.9 in the - history item - """ - dask_client = make_dask_client(n_workers=1) - mock_metric = make_scorer("mock", lambda x, y: 0.9) - iterations = 1 - - manager = EnsembleBuilderManager( - start_time=time.time(), - time_left_for_ensembles=1000, - backend=ensemble_backend, - dataset_name="Test", - task=BINARY_CLASSIFICATION, - metric=mock_metric, - ensemble_size=50, - ensemble_nbest=10, - max_models_on_disc=None, - seed=DEFAULT_SEED, - precision=32, - max_iterations=iterations, - read_at_most=np.inf, - ensemble_memory_limit=None, - random_state=0, - ) - manager.build_ensemble(dask_client) - future = manager.futures.pop() - dask.distributed.wait([future]) # wait for the ensemble process to finish - - result = future.result() - history, _, _, _, _ = result - - assert len(history) == iterations - - hist_item = history[0] - - expected_scores = { - f"ensemble_{key}_score": 0.9 for key in ["optimization", "val", "test"] - } - - assert "Timestamp" in hist_item - assert all(key in hist_item for key in expected_scores) - assert all(hist_item[key] == expected_scores[key] for key in expected_scores) diff --git a/test/test_ensemble_builder/test_ensemble.py b/test/test_ensemble_builder/test_ensemble.py index db4a3a09b3..6cc350dcd9 100644 --- a/test/test_ensemble_builder/test_ensemble.py +++ b/test/test_ensemble_builder/test_ensemble.py @@ -28,69 +28,6 @@ from test.conftest import DEFAULT_SEED from test.fixtures.logging import MockLogger - -@parametrize( - "ensemble_nbest, max_models_on_disc, expected", - ( - (1, None, 1), - (1.0, None, 2), - (0.1, None, 1), - (0.9, None, 1), - (1, 2, 1), - (2, 1, 1), - ), -) -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_nbest( - ensemble_backend: Backend, - ensemble_nbest: int | float, - max_models_on_disc: int | None, - expected: int, -) -> None: - """ - Parameters - ---------- - ensemble_backend: Backend - The backend to use. In this case, we specifically rely on the `setup_3_models` - setup. - - ensemble_nbest: int | float - The parameter to use for consider the n best, int being absolute and float being - fraction. - - max_models_on_disc: int | None - The maximum amount of models to keep on disk - - expected: int - The number of keys expected to be selected - - Expects - ------- - * get_n_best_preds should contain 2 keys - * The first key should be model 0_2_0_0 - """ - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=ensemble_nbest, - max_models_on_disc=max_models_on_disc, - ) - - ensbuilder.compute_loss_per_model() - sel_keys = ensbuilder.get_n_best_preds() - - assert len(sel_keys) == expected - - expected_sel = os.path.join( - ensemble_backend.temporary_directory, - ".auto-sklearn/runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy", - ) - assert sel_keys[0] == expected_sel - - @parametrize( "max_models_on_disc, expected", [ From 84d01e7e31a4ca75a3d28e987c2acfe9e34264c5 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 27 Mar 2022 19:50:33 +0200 Subject: [PATCH 007/117] Seperate out tests that rely on old toy data and those that don't --- test/fixtures/ensemble_builder.py | 0 test/test_ensemble_builder/cases.py | 2 +- test/test_ensemble_builder/test_ensemble.py | 730 ++++---------------- 3 files changed, 154 insertions(+), 578 deletions(-) delete mode 100644 test/fixtures/ensemble_builder.py diff --git a/test/fixtures/ensemble_builder.py b/test/fixtures/ensemble_builder.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/test_ensemble_builder/cases.py b/test/test_ensemble_builder/cases.py index 9b69510d92..94ab708210 100644 --- a/test/test_ensemble_builder/cases.py +++ b/test/test_ensemble_builder/cases.py @@ -11,7 +11,7 @@ import test.test_automl.cases as cases -@case +@case(tags=["real"]) @parametrize_with_cases("automl", cases=cases, has_tag="fitted") def case_fitted_automl( tmp_path: Path, diff --git a/test/test_ensemble_builder/test_ensemble.py b/test/test_ensemble_builder/test_ensemble.py index 6cc350dcd9..63bae11ad9 100644 --- a/test/test_ensemble_builder/test_ensemble.py +++ b/test/test_ensemble_builder/test_ensemble.py @@ -12,6 +12,7 @@ from autosklearn.automl_common.common.utils.backend import Backend from autosklearn.constants import BINARY_CLASSIFICATION +from autosklearn.data.xy_data_manager import XYDataManager from autosklearn.ensemble_builder import ( Y_ENSEMBLE, Y_TEST, @@ -21,122 +22,169 @@ ) from autosklearn.metrics import make_scorer, roc_auc -from pytest_cases import parametrize, parametrize_with_cases +from pytest_cases import fixture, parametrize, parametrize_with_cases from unittest.mock import Mock, patch import test.test_ensemble_builder.cases as cases from test.conftest import DEFAULT_SEED from test.fixtures.logging import MockLogger -@parametrize( - "max_models_on_disc, expected", - [ - # If None, no reduction - (None, 2), - # If Int, limit only on exceed - (4, 2), - (1, 1), - # If Float, translate float to # models. - # below, mock of each file is 100 Mb and 4 files .model and .npy (test/val/pred) - # per run (except for run3, there they are 5). Now, it takes 500MB for run 3 and - # another 500 MB of slack because we keep as much space as the largest model - # available as slack - (1499.0, 1), - (1500.0, 2), - (9999.0, 2), - ], -) -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_max_models_on_disc( - ensemble_backend: Backend, - max_models_on_disc: int | float, - expected: int, + +@fixture +def dummy_backend( + tmp_path: Path, + make_sklearn_dataset: Callable[..., XYDataManager], + make_backend: Callable[..., Backend], +) -> Backend: + datamanager = make_sklearn_dataset( + name="breast_cancer", + task=BINARY_CLASSIFICATION, + feat_type="numerical", # They're all numerical + as_datamanager=True, + ) + backend = make_backend(path=tmp_path / "backend") + backend.save_datamanager(datamanager) + return backend + + +@parametrize("n_models", [20, 50]) +@parametrize("mem_model", [1, 10, 100, 1000]) +@parametrize("mem_largest_mult", [1, 2, 10]) +@parametrize("n_expected", [1, 3, 5, 10]) +@parametrize("largest_is_best", [True, False]) +def test_max_models_on_disc_with_float_selects_expected_models( + n_models: int, + mem_model: int, + mem_largest_mult: int, + n_expected: int, + largest_is_best: bool, + dummy_backend: Backend ) -> None: """ Parameters ---------- - ensemble_backend : Backend - The backend to use, relies on setup_3_models + n_models : int + The number of models to have + + mem_model : int + The memory consumption per model + + mem_largest_mutl : int + How much the largest model takes (mem_largest = mem_per_model * mult) + + n_expected : int + How many models we expect the EnsembleBuilder to save + + largest_is_best: bool + Whether to include the largest models as one of the best models or as the worst. + + Fixtures + -------- + dummy_backend: Backend + Just a backend that's valid, contents don't matter for this test - max_models_on_disc : int | float - The max_models_on_disc param to use + Note + ---- + We use the parameters here to calculate the `max_models_on_disc` arg to verify + that with that calculate, we do indeed selected that many models. - expected : int - The expected number of selected models + mem_nbest = ... memory of the n best models + max_models_on_disc = float(mem_nbest + mem_largest_model) + + This is a bit backwards to calculate max_models_on_disc from what we expect but + it is much easier and still verifies behaviour. Expects ------- - * The number of selected models should be as expected + * The ensemble builder should select the expected number of models given the + calculated `max_models_on_disc`. """ - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=0, # important to find the test files - ensemble_nbest=4, - max_models_on_disc=max_models_on_disc, - ) - with patch("os.path.getsize") as mock: - mock.return_value = 100 * 1024 * 1024 - ensbuilder.compute_loss_per_model() - sel_keys = ensbuilder.get_n_best_preds() - assert len(sel_keys) == expected + # These are arranged so the last one is best, with the lose loss + losses = [ + { + "ens_loss": 10 * -n, + "num_run": n, + "loaded": 1, + "seed": DEFAULT_SEED, + "disc_space_cost_mb": mem_model, + } + for n in range(1, n_models + 1) + ] + mem_largest = mem_model * mem_largest_mult + if largest_is_best: + losses[-1]["disc_space_cost_mb"] = mem_largest + else: + losses[0]["disc_space_cost_mb"] = mem_largest + + nbest = sorted(losses, key=lambda item: item["ens_loss"])[:n_expected] + mem_for_nbest = sum(item["disc_space_cost_mb"] for item in nbest) + + slack = mem_largest # Slack introduced is the size of the largest model + max_models_on_disc = float(mem_for_nbest + slack) -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_max_models_on_disc_2(ensemble_backend: Backend) -> None: - # Test for Extreme scenarios - # Make sure that the best predictions are kept ensbuilder = EnsembleBuilder( - backend=ensemble_backend, + backend=dummy_backend, dataset_name="TEST", task_type=BINARY_CLASSIFICATION, metric=roc_auc, seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=50, - max_models_on_disc=10000.0, + max_models_on_disc=max_models_on_disc, + memory_limit=None, ) - ensbuilder.read_preds = {} - for n in range(50): - loss = 10 * -n - ensbuilder.read_losses["pred" + str(n)] = { - "ens_loss": loss, - "num_run": n, - "loaded": 1, - "seed": 0, - "disc_space_cost_mb": 50 * n, - } - ensbuilder.read_preds["pred" + str(n)] = {Y_ENSEMBLE: True} + # Enter the models, with each model being progressibly better + ensbuilder.read_losses = {f"pred{i}": v for i, v in enumerate(losses, start=1)} + # Make the last model twice as large + if largest_is_best: + ensbuilder.read_losses[f"pred{n_models}"]["disc_space_cost_mb"] = mem_largest + else: + ensbuilder.read_losses["pred1"]["disc_space_cost_mb"] = mem_largest + + ensbuilder.read_preds = { + f"pred{n}": {Y_ENSEMBLE: True} for n in range(1, n_models + 1) + } sel_keys = ensbuilder.get_n_best_preds() - assert ["pred49", "pred48", "pred47"] == sel_keys + + # The last expected_to_save models should be saved, the range iters backwards + expected = [f"pred{n}" for n in range(n_models, n_models - n_expected, -1)] + + assert len(sel_keys) == len(expected) and sel_keys == expected @parametrize("n_models", [50, 10, 2, 1]) -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_max_models_on_disc_preserves_always_preserves_at_least_one_model( +def test_max_models_on_disc_float_always_preserves_best_model( n_models: int, - ensemble_backend: Backend, + dummy_backend: Backend ) -> None: """ Parameters ---------- n_models : int + The number of models to start with - ensemble_backend : Backend + Fixtures + -------- + dummy_backend: Backend + Just a valid backend, contents don't matter for this test + Expects + ------- + * The best model should always be selected even if the memory assigned for models + on disc does not allow for any models. This is because we need at least one. """ + max_models_on_disc = 0.0 + ensbuilder = EnsembleBuilder( - backend=ensemble_backend, + backend=dummy_backend, dataset_name="TEST", task_type=BINARY_CLASSIFICATION, metric=roc_auc, seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=50, - max_models_on_disc=0.0, + max_models_on_disc=max_models_on_disc, + memory_limit=None, ) read_losses = { @@ -144,7 +192,7 @@ def test_max_models_on_disc_preserves_always_preserves_at_least_one_model( "ens_loss": 10 * -n, "num_run": n + 1, "loaded": 1, - "seed": 0, + "seed": DEFAULT_SEED, "disc_space_cost_mb": 50 * n, } for n in range(n_models) @@ -162,44 +210,44 @@ def test_max_models_on_disc_preserves_always_preserves_at_least_one_model( "performance_range_threshold, expected_selected", ((0.0, 4), (0.1, 4), (0.3, 3), (0.5, 2), (0.6, 2), (0.8, 1), (1.0, 1)), ) -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) def test_performance_range_threshold( - ensemble_backend: Backend, performance_range_threshold: float, expected_selected: int, + dummy_backend: Backend ) -> None: """ Parameters ---------- - ensemble_backend : Backend - The backend to use - performance_range_threshold : float THe performance range threshold to use expected_selected : int The number of selected models for there to be + Fixtures + -------- + dummy_backend: Backend + A valid backend whose contents don't matter for this test + Expects ------- * Expects the given amount of models to be selected given a performance range threshold. """ ensbuilder = EnsembleBuilder( - backend=ensemble_backend, + backend=dummy_backend, dataset_name="TEST", task_type=BINARY_CLASSIFICATION, metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=100, + seed=DEFAULT_SEED, performance_range_threshold=performance_range_threshold, ) ensbuilder.read_losses = { - "A": {"ens_loss": -1, "num_run": 1, "loaded": -1, "seed": 1}, - "B": {"ens_loss": -2, "num_run": 2, "loaded": -1, "seed": 1}, - "C": {"ens_loss": -3, "num_run": 3, "loaded": -1, "seed": 1}, - "D": {"ens_loss": -4, "num_run": 4, "loaded": -1, "seed": 1}, - "E": {"ens_loss": -5, "num_run": 5, "loaded": -1, "seed": 1}, + "A": {"ens_loss": -1, "num_run": 1, "loaded": -1, "seed": DEFAULT_SEED}, + "B": {"ens_loss": -2, "num_run": 2, "loaded": -1, "seed": DEFAULT_SEED}, + "C": {"ens_loss": -3, "num_run": 3, "loaded": -1, "seed": DEFAULT_SEED}, + "D": {"ens_loss": -4, "num_run": 4, "loaded": -1, "seed": DEFAULT_SEED}, + "E": {"ens_loss": -5, "num_run": 5, "loaded": -1, "seed": DEFAULT_SEED}, } ensbuilder.read_preds = { name: {preds_key: True for preds_key in (Y_ENSEMBLE, Y_VALID, Y_TEST)} @@ -223,43 +271,46 @@ def test_performance_range_threshold( (1, 1.0, 1), ), ) -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) def test_performance_range_threshold_with_ensemble_nbest( - ensemble_backend: Backend, performance_range_threshold: float, ensemble_nbest: int | float, expected_selected: int, + dummy_backend: Backend, ) -> None: """ Parameters ---------- - ensemble_backend : Backend performance_range_threshold : float ensemble_nbest : int | float expected_selected : int The number of models expected to be selected + Fixtures + -------- + dummy_backend: Backend + A backend whose contents are valid and don't matter for this test + Expects ------- * Given the setup of params for test_performance_range_threshold and ensemble_nbest, the expected number of models should be selected. """ ensbuilder = EnsembleBuilder( - backend=ensemble_backend, + backend=dummy_backend, dataset_name="TEST", task_type=BINARY_CLASSIFICATION, metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files + seed=DEFAULT_SEED, ensemble_nbest=ensemble_nbest, performance_range_threshold=performance_range_threshold, max_models_on_disc=None, ) ensbuilder.read_losses = { - "A": {"ens_loss": -1, "num_run": 1, "loaded": -1, "seed": 1}, - "B": {"ens_loss": -2, "num_run": 2, "loaded": -1, "seed": 1}, - "C": {"ens_loss": -3, "num_run": 3, "loaded": -1, "seed": 1}, - "D": {"ens_loss": -4, "num_run": 4, "loaded": -1, "seed": 1}, - "E": {"ens_loss": -5, "num_run": 5, "loaded": -1, "seed": 1}, + "A": {"ens_loss": -1, "num_run": 1, "loaded": -1, "seed": DEFAULT_SEED}, + "B": {"ens_loss": -2, "num_run": 2, "loaded": -1, "seed": DEFAULT_SEED}, + "C": {"ens_loss": -3, "num_run": 3, "loaded": -1, "seed": DEFAULT_SEED}, + "D": {"ens_loss": -4, "num_run": 4, "loaded": -1, "seed": DEFAULT_SEED}, + "E": {"ens_loss": -5, "num_run": 5, "loaded": -1, "seed": DEFAULT_SEED}, } ensbuilder.read_preds = { name: {pred_name: True for pred_name in (Y_ENSEMBLE, Y_VALID, Y_TEST)} @@ -270,245 +321,23 @@ def test_performance_range_threshold_with_ensemble_nbest( assert len(sel_keys) == expected_selected -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_fall_back_nbest(ensemble_backend: Backend) -> None: - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=1, - ) - - ensbuilder.compute_loss_per_model() - print() - print(ensbuilder.read_preds.keys()) - print(ensbuilder.read_losses.keys()) - print(ensemble_backend.temporary_directory) - - for model in ["0_1_0.0", "0_2_0.0", "0_3_100.0"]: - filename = os.path.join( - ensemble_backend.temporary_directory, - f".auto-sklearn/runs/{model}/predictions_ensemble_{model}.npy", - ) - ensbuilder.read_losses[filename]["ens_loss"] = -1 - - sel_keys = ensbuilder.get_n_best_preds() - - best_model = "0_1_0.0" - expected = os.path.join( - ensemble_backend.temporary_directory, - f".auto-sklearn/runs/{best_model}/predictions_ensemble_{best_model}.npy", - ) - - assert len(sel_keys) == 1 - assert sel_keys[0] == expected - - -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_get_valid_test_preds(ensemble_backend: Backend) -> None: - """ - Parameters - ---------- - ensemble_backend : Backend - The ensemble backend to use with the setup_3_models setup - """ - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=1, - ) - - # There are 3 models in the setup - # * Run 1 is the dummy run - # * Run 2 and Run 3 share the same predictions - # -> Run 2 is selected with ensemble_nbest = 1 - paths = [ - os.path.join( - ensemble_backend.temporary_directory, - f".auto-sklearn/runs/{model}/predictions_ensemble_{model}.npy", - ) - for model in ["0_1_0.0", "0_2_0.0", "0_3_100.0"] - ] - - ensbuilder.compute_loss_per_model() - - sel_keys = ensbuilder.get_n_best_preds() - assert len(sel_keys) == 1 - - ensbuilder.get_valid_test_preds(selected_keys=sel_keys) - - # Number of read files should be three and contain those of the models in the setup - assert set(ensbuilder.read_preds.keys()) == set(paths) - - selected = sel_keys - non_selected = set(paths) - set(sel_keys) - - # not selected --> should still be None - for key in non_selected: - assert ensbuilder.read_preds[key][Y_VALID] is None - assert ensbuilder.read_preds[key][Y_TEST] is None - - # selected --> read valid and test predictions - for key in selected: - assert ensbuilder.read_preds[key][Y_VALID] is not None - assert ensbuilder.read_preds[key][Y_TEST] is not None - - -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_ensemble_builder_predictions(ensemble_backend: Backend) -> None: - """ - Parameters - ---------- - ensemble_backend : Backend - The ensemble backend to use with the setup_3_models setup - - Expects - ------- - * The validation and test sets should both have equal predictions for them? - * Since model 0_2_0.0 has predictions exactly equal to the targets, it should - recieve full weight and that the predictions should be identical to that models - predictions - """ - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=2, - ) - ensbuilder.SAVE2DISC = False - - ensbuilder.compute_loss_per_model() - - d2 = os.path.join( - ensemble_backend.temporary_directory, - ".auto-sklearn/runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy", - ) - - sel_keys = ensbuilder.get_n_best_preds() - assert len(sel_keys) > 0 - - ensemble = ensbuilder.fit_ensemble(selected_keys=sel_keys) - print(ensemble, sel_keys) - - n_sel_valid, n_sel_test = ensbuilder.get_valid_test_preds(selected_keys=sel_keys) - - # both valid and test prediction files are available - assert len(n_sel_valid) > 0 - assert n_sel_valid == n_sel_test - - y_valid = ensbuilder.predict( - set_="valid", - ensemble=ensemble, - selected_keys=n_sel_valid, - n_preds=len(sel_keys), - index_run=1, - ) - y_test = ensbuilder.predict( - set_="test", - ensemble=ensemble, - selected_keys=n_sel_test, - n_preds=len(sel_keys), - index_run=1, - ) - - # predictions for valid and test are the same - # --> should results in the same predictions - np.testing.assert_array_almost_equal(y_valid, y_test) - - # since d2 provides perfect predictions - # it should get a higher weight - # so that y_valid should be exactly y_valid_d2 - y_valid_d2 = ensbuilder.read_preds[d2][Y_VALID][:, 1] - np.testing.assert_array_almost_equal(y_valid, y_valid_d2) - - -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_main(ensemble_backend: Backend) -> None: - """ - Parameters - ---------- - ensemble_backend : Backend - The ensemble_backend to use, this test relies on this specific case - - Expects - ------- - * There should be "read_preds" and "read_losses" saved to file - * There should be 3 model reads - * There should be a hash for the preds read in - * The true targets should have been read in - * The length of the history returned by run should be the same as the iterations - performed. - * The run history should contain "optimization", "val" and "test" scores, each being - the same at 1.0 due to the setup of "setup_3_models". - """ - iters = 1 - - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ) - - run_history, ensemble_nbest, _, _, _ = ensbuilder.main( - time_left=np.inf, - iteration=iters, - return_predictions=False, - ) - - internals_dir = Path(ensemble_backend.internals_directory) - read_preds_path = internals_dir / "ensemble_read_preds.pkl" - read_losses_path = internals_dir / "ensemble_read_losses.pkl" - - assert read_preds_path.exists(), list(internals_dir.iterdir()) - assert read_losses_path.exists(), list(internals_dir.iterdir()) - - # There should be three preds read - assert len(ensbuilder.read_preds) == 3 - assert ensbuilder.last_hash is not None - assert ensbuilder.y_true_ensemble is not None - - # We expect as many iterations as the iters param - assert len(run_history) == iters - hist_item = run_history[0] - - # As the data loader loads the same val/train/test - # we expect 1.0 as score and all keys available - expected_performance = { - "ensemble_val_score": 1.0, - "ensemble_test_score": 1.0, - "ensemble_optimization_score": 1.0, - } - - assert all(key in hist_item for key in expected_performance) - assert all(hist_item[key] == score for key, score in expected_performance.items()) - assert "Timestamp" in hist_item - - @parametrize("time_buffer", [1, 5]) @parametrize("duration", [10, 20]) -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_run_end_at(ensemble_backend: Backend, time_buffer: int, duration: int) -> None: +def test_run_end_at(dummy_backend: Backend, time_buffer: int, duration: int) -> None: """ Parameters ---------- - ensemble_backend : Backend - The backend to use - time_buffer: int How much time buffer to give to the ensemble builder duration: int How long to run the ensemble builder for + Fixtures + -------- + dummy_backend: Backend + A valid backend whose contents don't matter for this test + Expects ------- * The limits enforced by pynisher should account for the time_buffer and duration @@ -516,7 +345,7 @@ def test_run_end_at(ensemble_backend: Backend, time_buffer: int, duration: int) """ with patch("pynisher.enforce_limits") as pynisher_mock: ensbuilder = EnsembleBuilder( - backend=ensemble_backend, + backend=dummy_backend, dataset_name="TEST", task_type=BINARY_CLASSIFICATION, metric=roc_auc, @@ -532,256 +361,3 @@ def test_run_end_at(ensemble_backend: Backend, time_buffer: int, duration: int) # The 1 comes from the small overhead in conjuction with rounding down expected = duration - time_buffer - 1 assert pynisher_mock.call_args_list[0][1]["wall_time_in_s"] == expected - - -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_limit( - ensemble_backend: Backend, - mock_logger: MockLogger, -) -> None: - """ - - Parameters - ---------- - ensemble_backend : Backend - The backend setup to use - - Fixtures - -------- - mock_logger: MockLogger - A logger to inject into the EnsembleBuilder for tracking calls - - Expects - ------- - * Running from (ensemble_nbest, read_at_most) = (10, 5) where a memory exception - occurs in each run, we expect ensemble_nbest to be halved continuously until - it reaches 0, at which point read_at_most is reduced directly to 1. - """ - expected_states = [(10, 5), (5, 5), (2, 5), (1, 5), (0, 1)] - - starting_state = expected_states[0] - intermediate_states = expected_states[1:-1] - final_state = expected_states[-1] - - starting_nbest, starting_read_at_most = starting_state - - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=starting_nbest, - read_at_most=starting_read_at_most, - memory_limit=1, - ) - - # Force a memory error to occur - ensbuilder.predict = Mock(side_effect=MemoryError) # type: ignore - ensbuilder.logger = mock_logger # Mock its logger - ensbuilder.SAVE2DISC = False - - internal_dir = Path(ensemble_backend.internals_directory) - read_losses_file = internal_dir / "ensemble_read_losses.pkl" - read_preds_file = internal_dir / "ensemble_read_preds.pkl" - - def mtime_mock(filename: str) -> float: - """TODO, not really sure why we have to force these""" - path = Path(filename) - mtimes = { - # At second 0 - "predictions_ensemble_0_1_0.0.npy": 0.0, - "predictions_valid_0_1_0.0.npy": 0.1, - "predictions_test_0_1_0.0.npy": 0.2, - # At second 1 - "predictions_ensemble_0_2_0.0.npy": 1.0, - "predictions_valid_0_2_0.0.npy": 1.1, - "predictions_test_0_2_0.0.npy": 1.2, - # At second 2 - "predictions_ensemble_0_3_100.0.npy": 2.0, - "predictions_valid_0_3_100.0.npy": 2.1, - "predictions_test_0_3_100.0.npy": 2.2, - } - return mtimes[path.name] - - with patch("os.path.getmtime") as mtime: - mtime.side_effect = mtime_mock - - starting_state = (starting_nbest, starting_read_at_most) - assert (ensbuilder.ensemble_nbest, ensbuilder.read_at_most) == starting_state - - intermediate_states = [(5, 5), (2, 5), (1, 5), (0, 1)] - for i, exp_state in enumerate(intermediate_states, start=1): - ensbuilder.run(time_left=1000, iteration=0, pynisher_context="fork") - - assert read_losses_file.exists() - assert not read_preds_file.exists() - - assert mock_logger.warning.call_count == i # type: ignore - - assert (ensbuilder.ensemble_nbest, ensbuilder.read_at_most) == exp_state - - # At this point, when we've reached (ensemble_nbest, read_at_most) = (0, 1), - # we can still run the ensbulder but it should just raise an error and not - # change it's internal state - ensbuilder.run(time_left=1000, iteration=0, pynisher_context="fork") - - assert read_losses_file.exists() - assert not read_preds_file.exists() - - assert (ensbuilder.ensemble_nbest, ensbuilder.read_at_most) == final_state - - warning_call_count = mock_logger.warning.call_count # type: ignore - error_call_count = mock_logger.error.call_count # type: ignore - - assert warning_call_count == len(intermediate_states) - assert error_call_count == 1 - - for call_arg in mock_logger.error.call_args_list: # type: ignore - assert "Memory Exception -- Unable to further reduce" in str(call_arg) - - -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_read_pickle_read_preds(ensemble_backend: Backend) -> None: - """ - Parameters - ---------- - ensemble_backend : Backend - THe ensemble backend to use - - Expects - ------- - * The read_losses and read_preds should be cached between creation of - the EnsembleBuilder. - """ - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=2, - max_models_on_disc=None, - ) - ensbuilder.SAVE2DISC = False - - ensbuilder.main(time_left=np.inf, iteration=1, return_predictions=False) - - # Check that the memory was created - internal_dir = Path(ensemble_backend.internals_directory) - losses_file = internal_dir / "ensemble_read_losses.pkl" - memory_file = internal_dir / "ensemble_read_preds.pkl" - - assert memory_file.exists() - - # Make sure we pickle the correct read preads and hash - with memory_file.open("rb") as memory: - read_preds, last_hash = pickle.load(memory) - - def assert_equal_read_preds(a: dict, b: dict) -> None: - """ - * Keys are check to be the same at each depth - * Any ndarray as check for equality with numpy - * Everything else is checked with regular equality - """ - # Both arrays should have the same splits - assert set(a.keys()) == set(b.keys()) - - for k in a.keys(): - if isinstance(a[k], dict): - assert_equal_read_preds(a[k], b[k]) - elif isinstance(a[k], np.ndarray): - np.testing.assert_array_equal(a[k], b[k]) - else: - assert a[k] == b[k], f"Key: {k}" - - assert_equal_read_preds(read_preds, ensbuilder.read_preds) - assert last_hash == ensbuilder.last_hash - - assert losses_file.exists() - - # Make sure we pickle the correct read scores - with losses_file.open("rb") as memory: - read_losses = pickle.load(memory) - - assert_equal_read_preds(read_losses, ensbuilder.read_losses) - - # Then create a new instance, which should automatically read this file - ensbuilder2 = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=0, # important to find the test files - ensemble_nbest=2, - max_models_on_disc=None, - ) - assert_equal_read_preds(ensbuilder2.read_preds, ensbuilder.read_preds) - assert_equal_read_preds(ensbuilder2.read_losses, ensbuilder.read_losses) - assert ensbuilder2.last_hash == ensbuilder.last_hash - - -@parametrize_with_cases("ensemble_backend", cases=cases, has_tag=["setup_3_models"]) -def test_ensemble_builder_process_realrun( - ensemble_backend: Backend, - make_dask_client: Callable[..., dask.distributed.Client], -) -> None: - """ - - Parameters - ---------- - ensemble_backend : Backend - The backend to use, doesn't really matter which kind - - Fixtures - -------- - make_dask_client : Callable[..., [dask.distributed.Client]] - - Expects - ------- - * With 1 iteration, the history should only be of length one - * The expected ensmble score keys for "optimization", "valid" and "test" should - be in the one history item. - * The "Timestamp" key should be in the history item - * With a metric that always returns 0.9, each ensemble score should be 0.9 in the - history item - """ - dask_client = make_dask_client(n_workers=1) - mock_metric = make_scorer("mock", lambda x, y: 0.9) - iterations = 1 - - manager = EnsembleBuilderManager( - start_time=time.time(), - time_left_for_ensembles=1000, - backend=ensemble_backend, - dataset_name="Test", - task=BINARY_CLASSIFICATION, - metric=mock_metric, - ensemble_size=50, - ensemble_nbest=10, - max_models_on_disc=None, - seed=DEFAULT_SEED, - precision=32, - max_iterations=iterations, - read_at_most=np.inf, - ensemble_memory_limit=None, - random_state=0, - ) - manager.build_ensemble(dask_client) - future = manager.futures.pop() - dask.distributed.wait([future]) # wait for the ensemble process to finish - - result = future.result() - history, _, _, _, _ = result - - assert len(history) == iterations - - hist_item = history[0] - - expected_scores = { - f"ensemble_{key}_score": 0.9 for key in ["optimization", "val", "test"] - } - - assert "Timestamp" in hist_item - assert all(key in hist_item for key in expected_scores) - assert all(hist_item[key] == expected_scores[key] for key in expected_scores) From fcf6ad0aea61ae5bc2d33d80dc308225b8540ad7 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 27 Mar 2022 20:27:28 +0200 Subject: [PATCH 008/117] Setup test framework for ensemble builder on real situations --- setup.py | 2 +- test/test_automl/cases.py | 11 +-- ...t_ensemble.py => test_ensemble_builder.py} | 13 +-- ...cases.py => test_ensemble_builder_real.py} | 18 +++-- .../test_ensemble_manager.py | 81 ------------------- 5 files changed, 22 insertions(+), 103 deletions(-) rename test/test_ensemble_builder/{test_ensemble.py => test_ensemble_builder.py} (96%) rename test/test_ensemble_builder/{cases.py => test_ensemble_builder_real.py} (70%) delete mode 100644 test/test_ensemble_builder/test_ensemble_manager.py diff --git a/setup.py b/setup.py index e182cd716b..84af24be33 100644 --- a/setup.py +++ b/setup.py @@ -35,7 +35,7 @@ "pytest-cov", "pytest-xdist", "pytest-timeout", - "pytest-cases", + "pytest-cases>=3.6.11", "mypy", "isort", "black", diff --git a/test/test_automl/cases.py b/test/test_automl/cases.py index 70d68c4b73..74a22a20a8 100644 --- a/test/test_automl/cases.py +++ b/test/test_automl/cases.py @@ -14,6 +14,7 @@ {fitted} - If the automl case has been fitted {cv, holdout} - Whether explicitly cv or holdout was used {no_ensemble} - Fit with no ensemble size + {cached} - If the resulting case is then cached """ from typing import Callable, Tuple @@ -53,7 +54,7 @@ def case_regressor( # ################################### # The following are fitted and cached # ################################### -@case(tags=["classifier", "fitted", "holdout"]) +@case(tags=["classifier", "fitted", "holdout", "cached"]) @parametrize("dataset", ["iris"]) def case_classifier_fitted_holdout( automl_cache: Callable[[str], AutoMLCache], @@ -83,7 +84,7 @@ def case_classifier_fitted_holdout( return model -@case(tags=["classifier", "fitted", "cv"]) +@case(tags=["classifier", "fitted", "cv", "cached"]) @parametrize("dataset", ["iris"]) def case_classifier_fitted_cv( automl_cache: Callable[[str], AutoMLCache], @@ -111,7 +112,7 @@ def case_classifier_fitted_cv( return model -@case(tags=["regressor", "fitted", "holdout"]) +@case(tags=["regressor", "fitted", "holdout", "cached"]) @parametrize("dataset", ["boston"]) def case_regressor_fitted_holdout( automl_cache: Callable[[str], AutoMLCache], @@ -139,7 +140,7 @@ def case_regressor_fitted_holdout( return model -@case(tags=["regressor", "fitted", "cv"]) +@case(tags=["regressor", "fitted", "cv", "cached"]) @parametrize("dataset", ["boston"]) def case_regressor_fitted_cv( automl_cache: Callable[[str], AutoMLCache], @@ -168,7 +169,7 @@ def case_regressor_fitted_cv( return model -@case(tags=["classifier", "fitted", "no_ensemble"]) +@case(tags=["classifier", "fitted", "no_ensemble", "cached"]) @parametrize("dataset", ["iris"]) def case_classifier_fitted_no_ensemble( automl_cache: Callable[[str], AutoMLCache], diff --git a/test/test_ensemble_builder/test_ensemble.py b/test/test_ensemble_builder/test_ensemble_builder.py similarity index 96% rename from test/test_ensemble_builder/test_ensemble.py rename to test/test_ensemble_builder/test_ensemble_builder.py index 63bae11ad9..83fea24d22 100644 --- a/test/test_ensemble_builder/test_ensemble.py +++ b/test/test_ensemble_builder/test_ensemble_builder.py @@ -2,13 +2,9 @@ from typing import Callable -import os -import pickle import time from pathlib import Path -import dask.distributed -import numpy as np from autosklearn.automl_common.common.utils.backend import Backend from autosklearn.constants import BINARY_CLASSIFICATION @@ -18,16 +14,13 @@ Y_TEST, Y_VALID, EnsembleBuilder, - EnsembleBuilderManager, ) -from autosklearn.metrics import make_scorer, roc_auc +from autosklearn.metrics import roc_auc -from pytest_cases import fixture, parametrize, parametrize_with_cases -from unittest.mock import Mock, patch +from pytest_cases import fixture, parametrize +from unittest.mock import patch -import test.test_ensemble_builder.cases as cases from test.conftest import DEFAULT_SEED -from test.fixtures.logging import MockLogger @fixture diff --git a/test/test_ensemble_builder/cases.py b/test/test_ensemble_builder/test_ensemble_builder_real.py similarity index 70% rename from test/test_ensemble_builder/cases.py rename to test/test_ensemble_builder/test_ensemble_builder_real.py index 94ab708210..94ba79c7ff 100644 --- a/test/test_ensemble_builder/cases.py +++ b/test/test_ensemble_builder/test_ensemble_builder_real.py @@ -6,19 +6,18 @@ from autosklearn.automl import AutoML from autosklearn.automl_common.common.utils.backend import Backend -from pytest_cases import case, parametrize_with_cases +from pytest_cases import parametrize_with_cases import test.test_automl.cases as cases -@case(tags=["real"]) @parametrize_with_cases("automl", cases=cases, has_tag="fitted") -def case_fitted_automl( +def case_automl_cases( tmp_path: Path, automl: AutoML, make_backend: Callable[..., Backend], ) -> Backend: - """Gives the backend for from the cached automl instance + """Gives the backend for from the cached automl instance in `test_automl/cases.py` We do this by copying the backend produced from these cached automl runs to a new tmp directory for the ensemble builder tests to run from @@ -32,9 +31,16 @@ def case_fitted_automl( backend = make_backend(path=backend_path, template=original_backend) ensemble_dir = Path(backend.get_ensemble_dir()) - rmtree(ensemble_dir) + if ensemble_dir.exists(): + rmtree(ensemble_dir) ensemble_hist = Path(backend.internals_directory) / "ensemble_history.json" - ensemble_hist.unlink() + if ensemble_hist.exists(): + ensemble_hist.unlink() return backend + + +@parametrize_with_cases("ensemble_backend", cases=case_automl_cases) +def test_something(ensemble_backend: Backend) -> None: + return diff --git a/test/test_ensemble_builder/test_ensemble_manager.py b/test/test_ensemble_builder/test_ensemble_manager.py deleted file mode 100644 index e3b7296ba5..0000000000 --- a/test/test_ensemble_builder/test_ensemble_manager.py +++ /dev/null @@ -1,81 +0,0 @@ -from typing import Callable - -import time -from pathlib import Path - -import dask.distributed -import numpy as np - -from autosklearn.automl_common.common.utils.backend import Backend -from autosklearn.constants import BINARY_CLASSIFICATION -from autosklearn.ensemble_builder import EnsembleBuilderManager -from autosklearn.metrics import roc_auc - -from pytest_cases import parametrize_with_cases - -import test.test_ensemble_builder.cases as cases - - -@parametrize_with_cases("ensemble_backend", cases=cases) -def test_ensemble_builder_nbest_remembered( - ensemble_backend: Backend, - make_dask_client: Callable[..., [dask.distributed.Client]], -) -> None: - """ - Parameters - ---------- - ensemble_backend: Backend - The backend to use, relies on the 3 setup models - - Fixtures - -------- - make_dask_client: (...) -> Client - Make a dask client - - Expects - ------- - * The read_preds file should not be created - * The ensemble_nbest should be remembered and reduced between runs - TODO Note sure why there would be a reduction and how these numbers were made - - Last Note - --------- - "Makes sure ensemble builder returns the size of the ensemble that pynisher allowed - This way, we can remember it and not waste more time trying big ensemble sizes" - """ - dask_client = make_dask_client(n_workers=1) - - manager = EnsembleBuilderManager( - start_time=time.time(), - time_left_for_ensembles=1000, - backend=ensemble_backend, - dataset_name="Test", - task=BINARY_CLASSIFICATION, - metric=roc_auc, - ensemble_size=50, - ensemble_nbest=10, - max_models_on_disc=None, - seed=0, - precision=32, - read_at_most=np.inf, - ensemble_memory_limit=1000, - random_state=0, - max_iterations=None, - ) - - filepath = Path(ensemble_backend.internals_directory) / "ensemble_read_preds.pkl" - - manager.build_ensemble(dask_client, unit_test=True) - future = manager.futures[0] - dask.distributed.wait([future]) # wait for the ensemble process to finish - - assert future.result() == ([], 5, None, None, None) - - assert not filepath.exists() - - manager.build_ensemble(dask_client, unit_test=True) - future = manager.futures[0] - dask.distributed.wait([future]) # wait for the ensemble process to finish - - assert not filepath.exists() - assert future.result() == ([], 2, None, None, None) From 951bb2e6cf3f437c11f7afb2ca929705937a77a1 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 27 Mar 2022 20:35:05 +0200 Subject: [PATCH 009/117] Formatting --- .../test_3_models/test_3_models.py | 7 +------ .../test_ensemble_builder.py | 17 ++++------------- 2 files changed, 5 insertions(+), 19 deletions(-) diff --git a/test/test_ensemble_builder/test_3_models/test_3_models.py b/test/test_ensemble_builder/test_3_models/test_3_models.py index 2940edfeab..143a3ef735 100644 --- a/test/test_ensemble_builder/test_3_models/test_3_models.py +++ b/test/test_ensemble_builder/test_3_models/test_3_models.py @@ -1,6 +1,5 @@ from __future__ import annotations - import os from pathlib import Path @@ -8,11 +7,7 @@ from autosklearn.automl_common.common.utils.backend import Backend from autosklearn.constants import BINARY_CLASSIFICATION -from autosklearn.ensemble_builder import ( - Y_TEST, - Y_VALID, - EnsembleBuilder, -) +from autosklearn.ensemble_builder import Y_TEST, Y_VALID, EnsembleBuilder from autosklearn.metrics import roc_auc from pytest_cases import parametrize, parametrize_with_cases diff --git a/test/test_ensemble_builder/test_ensemble_builder.py b/test/test_ensemble_builder/test_ensemble_builder.py index 83fea24d22..7a10a37ff5 100644 --- a/test/test_ensemble_builder/test_ensemble_builder.py +++ b/test/test_ensemble_builder/test_ensemble_builder.py @@ -5,16 +5,10 @@ import time from pathlib import Path - from autosklearn.automl_common.common.utils.backend import Backend from autosklearn.constants import BINARY_CLASSIFICATION from autosklearn.data.xy_data_manager import XYDataManager -from autosklearn.ensemble_builder import ( - Y_ENSEMBLE, - Y_TEST, - Y_VALID, - EnsembleBuilder, -) +from autosklearn.ensemble_builder import Y_ENSEMBLE, Y_TEST, Y_VALID, EnsembleBuilder from autosklearn.metrics import roc_auc from pytest_cases import fixture, parametrize @@ -51,7 +45,7 @@ def test_max_models_on_disc_with_float_selects_expected_models( mem_largest_mult: int, n_expected: int, largest_is_best: bool, - dummy_backend: Backend + dummy_backend: Backend, ) -> None: """ Parameters @@ -149,8 +143,7 @@ def test_max_models_on_disc_with_float_selects_expected_models( @parametrize("n_models", [50, 10, 2, 1]) def test_max_models_on_disc_float_always_preserves_best_model( - n_models: int, - dummy_backend: Backend + n_models: int, dummy_backend: Backend ) -> None: """ Parameters @@ -204,9 +197,7 @@ def test_max_models_on_disc_float_always_preserves_best_model( ((0.0, 4), (0.1, 4), (0.3, 3), (0.5, 2), (0.6, 2), (0.8, 1), (1.0, 1)), ) def test_performance_range_threshold( - performance_range_threshold: float, - expected_selected: int, - dummy_backend: Backend + performance_range_threshold: float, expected_selected: int, dummy_backend: Backend ) -> None: """ Parameters From 5abf258f45350295abce15707dffcc0bd98332a4 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 27 Mar 2022 20:47:43 +0200 Subject: [PATCH 010/117] Remove `unit_test` arg --- autosklearn/ensemble_builder.py | 79 ++++++++++++++++----------------- 1 file changed, 39 insertions(+), 40 deletions(-) diff --git a/autosklearn/ensemble_builder.py b/autosklearn/ensemble_builder.py index e033a9b12b..de1c5f90f4 100644 --- a/autosklearn/ensemble_builder.py +++ b/autosklearn/ensemble_builder.py @@ -185,7 +185,6 @@ def __call__( def build_ensemble( self, dask_client: dask.distributed.Client, - unit_test: bool = False, ) -> None: # The second criteria is elapsed time @@ -261,7 +260,6 @@ def build_ensemble( priority=100, pynisher_context=self.pynisher_context, logger_port=self.logger_port, - unit_test=unit_test, ) ) @@ -299,7 +297,6 @@ def fit_and_return_ensemble( pynisher_context: str, max_models_on_disc: Optional[Union[float, int]] = 100, logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, - unit_test: bool = False, memory_limit: Optional[int] = None, random_state: Optional[Union[int, np.random.RandomState]] = None, ) -> Tuple[ @@ -374,12 +371,6 @@ def fit_and_return_ensemble( logger_port: int = DEFAULT_TCP_LOGGING_PORT The port where the logging server is listening to. - unit_test: bool = False - Turn on unit testing mode. This currently makes fit_ensemble raise a - MemoryError. Having this is very bad coding style, but I did not find a way - to make unittest.mock work through the pynisher with all spawn contexts. - If you know a better solution, please let us know by opening an issue. - memory_limit: Optional[int] = None memory limit in mb. If ``None``, no memory limit is enforced. @@ -406,7 +397,6 @@ def fit_and_return_ensemble( read_at_most=read_at_most, random_state=random_state, logger_port=logger_port, - unit_test=unit_test, ).run( end_at=end_at, iteration=iteration, @@ -416,7 +406,7 @@ def fit_and_return_ensemble( return result -class EnsembleBuilder(object): +class EnsembleBuilder: def __init__( self, backend: Backend, @@ -432,62 +422,75 @@ def __init__( memory_limit: Optional[int] = 1024, read_at_most: int = 5, logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, - random_state: Optional[Union[int, np.random.RandomState]] = None, - unit_test: bool = False, + random_state: int | np.random.RandomState | None = None, ): """ - Constructor - Parameters ---------- - backend: util.backend.Backend + backend: Backend backend to write and read files + dataset_name: str name of dataset + task_type: int type of ML task + metric: str name of metric to compute the loss of the given predictions + ensemble_size: int = 10 maximal size of ensemble (passed to autosklearn.ensemble.ensemble_selection) + ensemble_nbest: int | float = 100 - if int: consider only the n best prediction - if float: consider only this fraction of the best models - Both with respect to the validation predictions + + * int: consider only the n best prediction (> 0) + + * float: consider only this fraction of the best, between (0, 1) + + Both with respect to the validation predictions. If performance_range_threshold > 0, might return less models - max_models_on_disc: Optional[int | float] = 100 + + max_models_on_disc: int | float | None = 100 Defines the maximum number of models that are kept in the disc. - If int, it must be greater or equal than 1, and dictates the max number of - models to keep. - If float, it will be interpreted as the max megabytes allowed of disc space. - That is, if the number of ensemble candidates require more disc space than + It defines an upper bound on the models that can be used in the ensemble. + + * int: and dictates the max number of models to keep. (>= 1) + + * float: it will be interpreted as the max megabytes allowed of disc space. + If the number of ensemble candidates require more disc space than this float value, the worst models are deleted to keep within this budget. Models and predictions of the worst-performing models will be deleted then. - If None, the feature is disabled. - It defines an upper bound on the models that can be used in the ensemble. + + * None: the feature is disabled. + performance_range_threshold: float = 0 - Keep only models that are better than: - dummy + (best - dummy)*performance_range_threshold - E.g dummy=2, best=4, thresh=0.5 --> only consider models with loss > 3 Will at most return the minimum between ensemble_nbest models, and max_models_on_disc. Might return less + + Keep only models that are better than: + + dummy + (best - dummy) * performance_range_threshold + + E.g dummy=2, best=4, thresh=0.5 --> only consider models with loss > 3 + seed: int = 1 random seed that is used as part of the filename - precision: int in [16,32,64,128] = 32 + + precision: int [16 | 32 | 64 | 128] = 32 precision of floats to read the predictions + memory_limit: Optional[int] = 1024 memory limit in mb. If ``None``, no memory limit is enforced. + read_at_most: int = 5 read at most n new prediction files in each iteration + logger_port: int = DEFAULT_TCP_LOGGING_PORT port that receives logging records - random_state: Optional[int | RandomState] = None + + random_state: int | RandomState | None = None An int or RandomState object used for generating the ensemble. - unit_test: bool = False - Turn on unit testing mode. This currently makes fit_ensemble raise - a MemoryError. Having this is very bad coding style, but I did not find a - way to make unittest.mock work through the pynisher with all spawn contexts. - If you know a better solution, please let us know by opening an issue. """ super(EnsembleBuilder, self).__init__() @@ -525,7 +528,6 @@ def __init__( self.memory_limit = memory_limit self.read_at_most = read_at_most self.random_state = random_state - self.unit_test = unit_test # Setup the logger self.logger_port = logger_port @@ -1370,9 +1372,6 @@ def fit_ensemble(self, selected_keys: list): ensemble: EnsembleSelection trained Ensemble """ - if self.unit_test: - raise MemoryError() - predictions_train = [self.read_preds[k][Y_ENSEMBLE] for k in selected_keys] include_num_runs = [ ( From 3e8ed929712403677cc9c9d48fbacc3867b9d255 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 27 Mar 2022 20:54:49 +0200 Subject: [PATCH 011/117] Remove SAVE2DISC --- autosklearn/ensemble_builder.py | 53 +++++++++++---------------------- 1 file changed, 18 insertions(+), 35 deletions(-) diff --git a/autosklearn/ensemble_builder.py b/autosklearn/ensemble_builder.py index de1c5f90f4..b677be2835 100644 --- a/autosklearn/ensemble_builder.py +++ b/autosklearn/ensemble_builder.py @@ -414,12 +414,12 @@ def __init__( task_type: int, metric: Scorer, ensemble_size: int = 10, - ensemble_nbest: Union[int, float] = 100, - max_models_on_disc: Optional[int | float] = 100, + ensemble_nbest: int | float = 100, + max_models_on_disc: int | float | None = 100, performance_range_threshold: float = 0, seed: int = 1, precision: int = 32, - memory_limit: Optional[int] = 1024, + memory_limit: int | None = 1024, read_at_most: int = 5, logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, random_state: int | np.random.RandomState | None = None, @@ -492,62 +492,45 @@ def __init__( random_state: int | RandomState | None = None An int or RandomState object used for generating the ensemble. """ - super(EnsembleBuilder, self).__init__() + if isinstance(ensemble_nbest, int) and ensemble_nbest < 1: + raise ValueError(f"int ensemble_nbest ({ensemble_nbest}) must be (>1)") - self.backend = backend # communication with filesystem + if isinstance(ensemble_nbest, float) and not (0 <= ensemble_nbest <= 1): + raise ValueError(f"float ensemble_nbest ({ensemble_nbest}) not in (0,1)") + + if max_models_on_disc is not None and max_models_on_disc < 0: + raise ValueError("max_models_on_disc must be positive or None") + + self.backend = backend self.dataset_name = dataset_name self.task_type = task_type self.metric = metric self.ensemble_size = ensemble_size self.performance_range_threshold = performance_range_threshold - - if isinstance(ensemble_nbest, numbers.Integral) and ensemble_nbest < 1: - raise ValueError( - "Integer ensemble_nbest has to be larger 1: %s" % ensemble_nbest - ) - elif not isinstance(ensemble_nbest, numbers.Integral): - if ensemble_nbest < 0 or ensemble_nbest > 1: - raise ValueError( - "Float ensemble_nbest best has to be >= 0 and <= 1: %s" - % ensemble_nbest - ) - self.ensemble_nbest = ensemble_nbest - - # max_models_on_disc can be a float, in such case we need to - # remember the user specified Megabytes and translate this to - # max number of ensemble models. max_resident_models keeps the - # maximum number of models in disc - if max_models_on_disc is not None and max_models_on_disc < 0: - raise ValueError("max_models_on_disc has to be a positive number or None") self.max_models_on_disc = max_models_on_disc - self.max_resident_models = None - self.seed = seed self.precision = precision self.memory_limit = memory_limit self.read_at_most = read_at_most self.random_state = random_state + # max_resident_models keeps the maximum number of models in disc + self.max_resident_models: int | None = None + # Setup the logger + self.logger = get_named_client_logger(name="EnsembleBuilder", port=logger_port) self.logger_port = logger_port - self.logger = get_named_client_logger( - name="EnsembleBuilder", - port=self.logger_port, - ) if ensemble_nbest == 1: self.logger.debug( - "Behaviour depends on int/float: %s, %s (ensemble_nbest, type)" - % (ensemble_nbest, type(ensemble_nbest)) + f"Behaviour dep. on int/float: {ensemble_nbest}:{type(ensemble_nbest)}" ) self.start_time = 0 self.model_fn_re = re.compile(MODEL_FN_RE) - self.last_hash = None # hash of ensemble training data self.y_true_ensemble = None - self.SAVE2DISC = True # already read prediction files # {"file name": { @@ -825,7 +808,7 @@ def main(self, time_left, iteration, return_predictions): ensemble = self.fit_ensemble(selected_keys=candidate_models) # Save the ensemble for later use in the main auto-sklearn module! - if ensemble is not None and self.SAVE2DISC: + if ensemble is not None: self.backend.save_ensemble(ensemble, iteration, self.seed) # Delete files of non-candidate models - can only be done after fitting the From 5dd9832adc9eca846bbb56a8f7303d078d2f92e2 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 27 Mar 2022 21:02:43 +0200 Subject: [PATCH 012/117] Split builder and manager into seperate files --- autosklearn/automl.py | 2 +- autosklearn/ensemble_building/__init__.py | 4 + .../builder.py} | 371 +---------------- autosklearn/ensemble_building/manager.py | 383 ++++++++++++++++++ autosklearn/smbo.py | 2 +- pyproject.toml | 3 +- .../test_3_models/test_3_models.py | 3 +- .../test_ensemble_builder.py | 7 +- test/test_estimators/test_estimators.py | 2 +- 9 files changed, 400 insertions(+), 377 deletions(-) create mode 100644 autosklearn/ensemble_building/__init__.py rename autosklearn/{ensemble_builder.py => ensemble_building/builder.py} (78%) create mode 100644 autosklearn/ensemble_building/manager.py diff --git a/autosklearn/automl.py b/autosklearn/automl.py index 3c6caecc88..c400284849 100644 --- a/autosklearn/automl.py +++ b/autosklearn/automl.py @@ -61,7 +61,7 @@ convert_if_sparse, ) from autosklearn.data.xy_data_manager import XYDataManager -from autosklearn.ensemble_builder import EnsembleBuilderManager +from autosklearn.ensemble_building import EnsembleBuilderManager from autosklearn.ensembles.singlebest_ensemble import SingleBest from autosklearn.evaluation import ExecuteTaFuncWithQueue, get_cost_of_crash from autosklearn.evaluation.abstract_evaluator import _fit_and_suppress_warnings diff --git a/autosklearn/ensemble_building/__init__.py b/autosklearn/ensemble_building/__init__.py new file mode 100644 index 0000000000..95ba64e83e --- /dev/null +++ b/autosklearn/ensemble_building/__init__.py @@ -0,0 +1,4 @@ +from autosklearn.ensemble_building.builder import EnsembleBuilder +from autosklearn.ensemble_building.manager import EnsembleBuilderManager + +__all__ = ["EnsembleBuilder", "EnsembleBuilderManager"] diff --git a/autosklearn/ensemble_builder.py b/autosklearn/ensemble_building/builder.py similarity index 78% rename from autosklearn/ensemble_builder.py rename to autosklearn/ensemble_building/builder.py index b677be2835..63b8f76b50 100644 --- a/autosklearn/ensemble_builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import List, Optional, Tuple, Union +from typing import List, Optional, Tuple import glob import gzip @@ -16,14 +16,9 @@ import traceback import zlib -import dask.distributed import numpy as np import pandas as pd import pynisher -from smac.callbacks import IncorporateRunResultCallback -from smac.optimizer.smbo import SMBO -from smac.runhistory.runhistory import RunInfo, RunValue -from smac.tae.base import StatusType from autosklearn.automl_common.common.ensemble_building.abstract_ensemble import ( # noqa: E501 AbstractEnsemble, @@ -42,370 +37,6 @@ MODEL_FN_RE = r"_([0-9]*)_([0-9]*)_([0-9]{1,3}\.[0-9]*)\.npy" -class EnsembleBuilderManager(IncorporateRunResultCallback): - def __init__( - self, - start_time: float, - time_left_for_ensembles: float, - backend: Backend, - dataset_name: str, - task: int, - metric: Scorer, - ensemble_size: int, - ensemble_nbest: int, - seed: int, - precision: int, - max_iterations: Optional[int], - read_at_most: int, - ensemble_memory_limit: Optional[int], - random_state: Union[int, np.random.RandomState], - max_models_on_disc: Optional[float | int] = 100, - logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, - pynisher_context: str = "fork", - ): - """SMAC callback to handle ensemble building - - Parameters - ---------- - start_time: int - the time when this job was started, to account for any latency in job - allocation. - - time_left_for_ensemble: int - How much time is left for the task. Job should finish within this - allocated time - - backend: util.backend.Backend - backend to write and read files - - dataset_name: str - name of dataset - - task_type: int - type of ML task - - metric: str - name of metric to compute the loss of the given predictions - - ensemble_size: int - maximal size of ensemble - - ensemble_nbest: int/float - if int: consider only the n best prediction - if float: consider only this fraction of the best models - Both wrt to validation predictions - If performance_range_threshold > 0, might return less models - - max_models_on_disc: Optional[int | float] = 100 - Defines the maximum number of models that are kept in the disc. - - If int, it must be greater or equal than 1, and dictates the max - number of models to keep. - - If float, it will be interpreted as the max megabytes allowed of - disc space. That is, if the number of ensemble candidates require more - disc space than this float value, the worst models will be deleted to - keep within this budget. Models and predictions of the worst-performing - models will be deleted then. - - If None, the feature is disabled. It defines an upper bound on the - models that can be used in the ensemble. - - seed: int - random seed - - max_iterations: int - maximal number of iterations to run this script - (default None --> deactivated) - - precision: [16,32,64,128] - precision of floats to read the predictions - - ensemble_memory_limit: Optional[int] - memory limit in mb. If ``None``, no memory limit is enforced. - - read_at_most: int - read at most n new prediction files in each iteration - - logger_port: int - port that receives logging records - - pynisher_context: str - The multiprocessing context for pynisher. One of spawn/fork/forkserver. - - """ - self.start_time = start_time - self.time_left_for_ensembles = time_left_for_ensembles - self.backend = backend - self.dataset_name = dataset_name - self.task = task - self.metric = metric - self.ensemble_size = ensemble_size - self.ensemble_nbest = ensemble_nbest - self.max_models_on_disc = max_models_on_disc - self.seed = seed - self.precision = precision - self.max_iterations = max_iterations - self.read_at_most = read_at_most - self.ensemble_memory_limit = ensemble_memory_limit - self.random_state = random_state - self.logger_port = logger_port - self.pynisher_context = pynisher_context - - # Store something similar to SMAC's runhistory - self.history = [] - - # We only submit new ensembles when there is not an active ensemble job - self.futures = [] - - # The last criteria is the number of iterations - self.iteration = 0 - - # Keep track of when we started to know when we need to finish! - self.start_time = time.time() - - def __call__( - self, - smbo: "SMBO", - run_info: RunInfo, - result: RunValue, - time_left: float, - ): - """ - Returns - ------- - List[Tuple[int, float, float, float]]: - A list with the performance history of this ensemble, of the form - [(pandas_timestamp, train_performance, val_performance, test_performance)] - """ - if result.status in (StatusType.STOP, StatusType.ABORT) or smbo._stop: - return - self.build_ensemble(smbo.tae_runner.client) - - def build_ensemble( - self, - dask_client: dask.distributed.Client, - ) -> None: - - # The second criteria is elapsed time - elapsed_time = time.time() - self.start_time - - logger = get_named_client_logger( - name="EnsembleBuilder", - port=self.logger_port, - ) - - # First test for termination conditions - if self.time_left_for_ensembles < elapsed_time: - logger.info( - "Terminate ensemble building as not time is left (run for {}s)".format( - elapsed_time - ), - ) - return - if self.max_iterations is not None and self.max_iterations <= self.iteration: - logger.info( - "Terminate ensemble building because of max iterations:" - f" {self.max_iterations} of {self.iteration}" - ) - return - - if len(self.futures) != 0: - if self.futures[0].done(): - result = self.futures.pop().result() - if result: - ensemble_history, self.ensemble_nbest, _, _, _ = result - logger.debug( - "iteration={} @ elapsed_time={} has history={}".format( - self.iteration, - elapsed_time, - ensemble_history, - ) - ) - self.history.extend(ensemble_history) - - # Only submit new jobs if the previous ensemble job finished - if len(self.futures) == 0: - - # Add the result of the run - # On the next while iteration, no references to - # ensemble builder object, so it should be garbage collected to - # save memory while waiting for resources - # Also, notice how ensemble nbest is returned, so we don't waste - # iterations testing if the deterministic predictions size can - # be fitted in memory - try: - # Submit a Dask job from this job, to properly - # see it in the dask diagnostic dashboard - # Notice that the forked ensemble_builder_process will - # wait for the below function to be done - self.futures.append( - dask_client.submit( - fit_and_return_ensemble, - backend=self.backend, - dataset_name=self.dataset_name, - task_type=self.task, - metric=self.metric, - ensemble_size=self.ensemble_size, - ensemble_nbest=self.ensemble_nbest, - max_models_on_disc=self.max_models_on_disc, - seed=self.seed, - precision=self.precision, - memory_limit=self.ensemble_memory_limit, - read_at_most=self.read_at_most, - random_state=self.random_state, - end_at=self.start_time + self.time_left_for_ensembles, - iteration=self.iteration, - return_predictions=False, - priority=100, - pynisher_context=self.pynisher_context, - logger_port=self.logger_port, - ) - ) - - logger.info( - "{}/{} Started Ensemble builder job at {} for iteration {}.".format( - # Log the client to make sure we - # remain connected to the scheduler - self.futures[0], - dask_client, - time.strftime("%Y.%m.%d-%H.%M.%S"), - self.iteration, - ), - ) - self.iteration += 1 - except Exception as e: - exception_traceback = traceback.format_exc() - error_message = repr(e) - logger.critical(exception_traceback) - logger.critical(error_message) - - -def fit_and_return_ensemble( - backend: Backend, - dataset_name: str, - task_type: str, - metric: Scorer, - ensemble_size: int, - ensemble_nbest: int, - seed: int, - precision: int, - read_at_most: int, - end_at: float, - iteration: int, - return_predictions: bool, - pynisher_context: str, - max_models_on_disc: Optional[Union[float, int]] = 100, - logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, - memory_limit: Optional[int] = None, - random_state: Optional[Union[int, np.random.RandomState]] = None, -) -> Tuple[ - List[Tuple[int, float, float, float]], - int, - Optional[np.ndarray], - Optional[np.ndarray], - Optional[np.ndarray], -]: - """ - - A short function to fit and create an ensemble. It is just a wrapper to easily send - a request to dask to create an ensemble and clean the memory when finished - - Parameters - ---------- - backend: util.backend.Backend - backend to write and read files - - dataset_name: str - name of dataset - - metric: str - name of metric to compute the loss of the given predictions - - task_type: int - type of ML task - - ensemble_size: int - maximal size of ensemble (passed to autosklearn.ensemble.ensemble_selection) - - ensemble_nbest: int/float - if int: consider only the n best prediction - if float: consider only this fraction of the best models - Both wrt to validation predictions - If performance_range_threshold > 0, might return less models - - max_models_on_disc: Optional[int | float] = 100 - Defines the maximum number of models that are kept in the disc. - - If int, it must be greater or equal than 1, and dictates the max number of - models to keep. - - If float, it will be interpreted as the max megabytes allowed of disc space. - That is, if the number of ensemble candidates require more disc space than - this float value, the worst models will be deleted to keep within this - budget. Models and predictions of the worst-performing models will be - deleted then. - - If None, the feature is disabled. - It defines an upper bound on the models that can be used in the ensemble. - - seed: int - random seed - - precision: [16,32,64,128] - precision of floats to read the predictions - - read_at_most: int - read at most n new prediction files in each iteration - - end_at: float - At what time the job must finish. Needs to be the endtime and not the - time left because we do not know when dask schedules the job. - - iteration: int - The current iteration - - pynisher_context: str - Context to use for multiprocessing, can be either fork, spawn or forkserver. - - logger_port: int = DEFAULT_TCP_LOGGING_PORT - The port where the logging server is listening to. - - memory_limit: Optional[int] = None - memory limit in mb. If ``None``, no memory limit is enforced. - - random_state: Optional[int | RandomState] = None - A random state used for the ensemble selection process. - - Returns - ------- - List[Tuple[int, float, float, float]] - A list with the performance history of this ensemble, of the form - [(pandas_timestamp, train_performance, val_performance, test_performance)] - """ - result = EnsembleBuilder( - backend=backend, - dataset_name=dataset_name, - task_type=task_type, - metric=metric, - ensemble_size=ensemble_size, - ensemble_nbest=ensemble_nbest, - max_models_on_disc=max_models_on_disc, - seed=seed, - precision=precision, - memory_limit=memory_limit, - read_at_most=read_at_most, - random_state=random_state, - logger_port=logger_port, - ).run( - end_at=end_at, - iteration=iteration, - return_predictions=return_predictions, - pynisher_context=pynisher_context, - ) - return result - - class EnsembleBuilder: def __init__( self, diff --git a/autosklearn/ensemble_building/manager.py b/autosklearn/ensemble_building/manager.py new file mode 100644 index 0000000000..c443cc242a --- /dev/null +++ b/autosklearn/ensemble_building/manager.py @@ -0,0 +1,383 @@ +from __future__ import annotations + +from typing import List, Optional, Tuple, Union + +import logging.handlers +import time +import traceback + +import dask.distributed +import numpy as np +from smac.callbacks import IncorporateRunResultCallback +from smac.optimizer.smbo import SMBO +from smac.runhistory.runhistory import RunInfo, RunValue +from smac.tae.base import StatusType + +from autosklearn.metrics import Scorer +from autosklearn.util.logging_ import get_named_client_logger +from autosklearn.ensemble_building.builder import EnsembleBuilder +from autosklearn.automl_common.common.utils.backend import Backend + + +class EnsembleBuilderManager(IncorporateRunResultCallback): + def __init__( + self, + start_time: float, + time_left_for_ensembles: float, + backend: Backend, + dataset_name: str, + task: int, + metric: Scorer, + ensemble_size: int, + ensemble_nbest: int, + seed: int, + precision: int, + max_iterations: Optional[int], + read_at_most: int, + ensemble_memory_limit: Optional[int], + random_state: Union[int, np.random.RandomState], + max_models_on_disc: Optional[float | int] = 100, + logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, + pynisher_context: str = "fork", + ): + """SMAC callback to handle ensemble building + + Parameters + ---------- + start_time: int + the time when this job was started, to account for any latency in job + allocation. + + time_left_for_ensemble: int + How much time is left for the task. Job should finish within this + allocated time + + backend: util.backend.Backend + backend to write and read files + + dataset_name: str + name of dataset + + task_type: int + type of ML task + + metric: str + name of metric to compute the loss of the given predictions + + ensemble_size: int + maximal size of ensemble + + ensemble_nbest: int/float + if int: consider only the n best prediction + if float: consider only this fraction of the best models + Both wrt to validation predictions + If performance_range_threshold > 0, might return less models + + max_models_on_disc: Optional[int | float] = 100 + Defines the maximum number of models that are kept in the disc. + + If int, it must be greater or equal than 1, and dictates the max + number of models to keep. + + If float, it will be interpreted as the max megabytes allowed of + disc space. That is, if the number of ensemble candidates require more + disc space than this float value, the worst models will be deleted to + keep within this budget. Models and predictions of the worst-performing + models will be deleted then. + + If None, the feature is disabled. It defines an upper bound on the + models that can be used in the ensemble. + + seed: int + random seed + + max_iterations: int + maximal number of iterations to run this script + (default None --> deactivated) + + precision: [16,32,64,128] + precision of floats to read the predictions + + ensemble_memory_limit: Optional[int] + memory limit in mb. If ``None``, no memory limit is enforced. + + read_at_most: int + read at most n new prediction files in each iteration + + logger_port: int + port that receives logging records + + pynisher_context: str + The multiprocessing context for pynisher. One of spawn/fork/forkserver. + + """ + self.start_time = start_time + self.time_left_for_ensembles = time_left_for_ensembles + self.backend = backend + self.dataset_name = dataset_name + self.task = task + self.metric = metric + self.ensemble_size = ensemble_size + self.ensemble_nbest = ensemble_nbest + self.max_models_on_disc = max_models_on_disc + self.seed = seed + self.precision = precision + self.max_iterations = max_iterations + self.read_at_most = read_at_most + self.ensemble_memory_limit = ensemble_memory_limit + self.random_state = random_state + self.logger_port = logger_port + self.pynisher_context = pynisher_context + + # Store something similar to SMAC's runhistory + self.history = [] + + # We only submit new ensembles when there is not an active ensemble job + self.futures = [] + + # The last criteria is the number of iterations + self.iteration = 0 + + # Keep track of when we started to know when we need to finish! + self.start_time = time.time() + + def __call__( + self, + smbo: "SMBO", + run_info: RunInfo, + result: RunValue, + time_left: float, + ): + """ + Returns + ------- + List[Tuple[int, float, float, float]]: + A list with the performance history of this ensemble, of the form + [(pandas_timestamp, train_performance, val_performance, test_performance)] + """ + if result.status in (StatusType.STOP, StatusType.ABORT) or smbo._stop: + return + self.build_ensemble(smbo.tae_runner.client) + + def build_ensemble( + self, + dask_client: dask.distributed.Client, + ) -> None: + + # The second criteria is elapsed time + elapsed_time = time.time() - self.start_time + + logger = get_named_client_logger( + name="EnsembleBuilder", + port=self.logger_port, + ) + + # First test for termination conditions + if self.time_left_for_ensembles < elapsed_time: + logger.info( + "Terminate ensemble building as not time is left (run for {}s)".format( + elapsed_time + ), + ) + return + if self.max_iterations is not None and self.max_iterations <= self.iteration: + logger.info( + "Terminate ensemble building because of max iterations:" + f" {self.max_iterations} of {self.iteration}" + ) + return + + if len(self.futures) != 0: + if self.futures[0].done(): + result = self.futures.pop().result() + if result: + ensemble_history, self.ensemble_nbest, _, _, _ = result + logger.debug( + "iteration={} @ elapsed_time={} has history={}".format( + self.iteration, + elapsed_time, + ensemble_history, + ) + ) + self.history.extend(ensemble_history) + + # Only submit new jobs if the previous ensemble job finished + if len(self.futures) == 0: + + # Add the result of the run + # On the next while iteration, no references to + # ensemble builder object, so it should be garbage collected to + # save memory while waiting for resources + # Also, notice how ensemble nbest is returned, so we don't waste + # iterations testing if the deterministic predictions size can + # be fitted in memory + try: + # Submit a Dask job from this job, to properly + # see it in the dask diagnostic dashboard + # Notice that the forked ensemble_builder_process will + # wait for the below function to be done + self.futures.append( + dask_client.submit( + fit_and_return_ensemble, + backend=self.backend, + dataset_name=self.dataset_name, + task_type=self.task, + metric=self.metric, + ensemble_size=self.ensemble_size, + ensemble_nbest=self.ensemble_nbest, + max_models_on_disc=self.max_models_on_disc, + seed=self.seed, + precision=self.precision, + memory_limit=self.ensemble_memory_limit, + read_at_most=self.read_at_most, + random_state=self.random_state, + end_at=self.start_time + self.time_left_for_ensembles, + iteration=self.iteration, + return_predictions=False, + priority=100, + pynisher_context=self.pynisher_context, + logger_port=self.logger_port, + ) + ) + + logger.info( + "{}/{} Started Ensemble builder job at {} for iteration {}.".format( + # Log the client to make sure we + # remain connected to the scheduler + self.futures[0], + dask_client, + time.strftime("%Y.%m.%d-%H.%M.%S"), + self.iteration, + ), + ) + self.iteration += 1 + except Exception as e: + exception_traceback = traceback.format_exc() + error_message = repr(e) + logger.critical(exception_traceback) + logger.critical(error_message) + + +def fit_and_return_ensemble( + backend: Backend, + dataset_name: str, + task_type: str, + metric: Scorer, + ensemble_size: int, + ensemble_nbest: int, + seed: int, + precision: int, + read_at_most: int, + end_at: float, + iteration: int, + return_predictions: bool, + pynisher_context: str, + max_models_on_disc: Optional[Union[float, int]] = 100, + logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, + memory_limit: Optional[int] = None, + random_state: Optional[Union[int, np.random.RandomState]] = None, +) -> Tuple[ + List[Tuple[int, float, float, float]], + int, + Optional[np.ndarray], + Optional[np.ndarray], + Optional[np.ndarray], +]: + """ + + A short function to fit and create an ensemble. It is just a wrapper to easily send + a request to dask to create an ensemble and clean the memory when finished + + Parameters + ---------- + backend: util.backend.Backend + backend to write and read files + + dataset_name: str + name of dataset + + metric: str + name of metric to compute the loss of the given predictions + + task_type: int + type of ML task + + ensemble_size: int + maximal size of ensemble (passed to autosklearn.ensemble.ensemble_selection) + + ensemble_nbest: int/float + if int: consider only the n best prediction + if float: consider only this fraction of the best models + Both wrt to validation predictions + If performance_range_threshold > 0, might return less models + + max_models_on_disc: Optional[int | float] = 100 + Defines the maximum number of models that are kept in the disc. + + If int, it must be greater or equal than 1, and dictates the max number of + models to keep. + + If float, it will be interpreted as the max megabytes allowed of disc space. + That is, if the number of ensemble candidates require more disc space than + this float value, the worst models will be deleted to keep within this + budget. Models and predictions of the worst-performing models will be + deleted then. + + If None, the feature is disabled. + It defines an upper bound on the models that can be used in the ensemble. + + seed: int + random seed + + precision: [16,32,64,128] + precision of floats to read the predictions + + read_at_most: int + read at most n new prediction files in each iteration + + end_at: float + At what time the job must finish. Needs to be the endtime and not the + time left because we do not know when dask schedules the job. + + iteration: int + The current iteration + + pynisher_context: str + Context to use for multiprocessing, can be either fork, spawn or forkserver. + + logger_port: int = DEFAULT_TCP_LOGGING_PORT + The port where the logging server is listening to. + + memory_limit: Optional[int] = None + memory limit in mb. If ``None``, no memory limit is enforced. + + random_state: Optional[int | RandomState] = None + A random state used for the ensemble selection process. + + Returns + ------- + List[Tuple[int, float, float, float]] + A list with the performance history of this ensemble, of the form + [(pandas_timestamp, train_performance, val_performance, test_performance)] + """ + result = EnsembleBuilder( + backend=backend, + dataset_name=dataset_name, + task_type=task_type, + metric=metric, + ensemble_size=ensemble_size, + ensemble_nbest=ensemble_nbest, + max_models_on_disc=max_models_on_disc, + seed=seed, + precision=precision, + memory_limit=memory_limit, + read_at_most=read_at_most, + random_state=random_state, + logger_port=logger_port, + ).run( + end_at=end_at, + iteration=iteration, + return_predictions=return_predictions, + pynisher_context=pynisher_context, + ) + return result diff --git a/autosklearn/smbo.py b/autosklearn/smbo.py index 7a74490081..293b615b12 100644 --- a/autosklearn/smbo.py +++ b/autosklearn/smbo.py @@ -32,7 +32,7 @@ TASK_TYPES_TO_STRING, ) from autosklearn.data.abstract_data_manager import AbstractDataManager -from autosklearn.ensemble_builder import EnsembleBuilderManager +from autosklearn.ensemble_building import EnsembleBuilderManager from autosklearn.evaluation import ExecuteTaFuncWithQueue, get_cost_of_crash from autosklearn.metalearning.metafeatures.metafeatures import ( calculate_all_metafeatures_encoded_labels, diff --git a/pyproject.toml b/pyproject.toml index 3e3aafb1f6..44cb62cd3b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -95,7 +95,8 @@ module = [ "autosklearn.automl", "autosklearn.smbo", "autosklearn.experimental.askl2", - "autosklearn.ensemble_builder", + "autosklearn.ensemble_building.builder", + "autosklearn.ensemble_building.manager", "autosklearn.ensembles.singlebest_ensemble", "autosklearn.ensembles.ensemble_selection", "autosklearn.evaluation", #__init__ diff --git a/test/test_ensemble_builder/test_3_models/test_3_models.py b/test/test_ensemble_builder/test_3_models/test_3_models.py index 143a3ef735..73a9462093 100644 --- a/test/test_ensemble_builder/test_3_models/test_3_models.py +++ b/test/test_ensemble_builder/test_3_models/test_3_models.py @@ -7,7 +7,7 @@ from autosklearn.automl_common.common.utils.backend import Backend from autosklearn.constants import BINARY_CLASSIFICATION -from autosklearn.ensemble_builder import Y_TEST, Y_VALID, EnsembleBuilder +from autosklearn.ensemble_building.builder import Y_TEST, Y_VALID, EnsembleBuilder from autosklearn.metrics import roc_auc from pytest_cases import parametrize, parametrize_with_cases @@ -275,7 +275,6 @@ def test_ensemble_builder_predictions(ensemble_backend: Backend) -> None: seed=DEFAULT_SEED, # important to find the test files ensemble_nbest=2, ) - ensbuilder.SAVE2DISC = False ensbuilder.compute_loss_per_model() diff --git a/test/test_ensemble_builder/test_ensemble_builder.py b/test/test_ensemble_builder/test_ensemble_builder.py index 7a10a37ff5..c621a18262 100644 --- a/test/test_ensemble_builder/test_ensemble_builder.py +++ b/test/test_ensemble_builder/test_ensemble_builder.py @@ -8,7 +8,12 @@ from autosklearn.automl_common.common.utils.backend import Backend from autosklearn.constants import BINARY_CLASSIFICATION from autosklearn.data.xy_data_manager import XYDataManager -from autosklearn.ensemble_builder import Y_ENSEMBLE, Y_TEST, Y_VALID, EnsembleBuilder +from autosklearn.ensemble_building.builder import ( + Y_ENSEMBLE, + Y_TEST, + Y_VALID, + EnsembleBuilder, +) from autosklearn.metrics import roc_auc from pytest_cases import fixture, parametrize diff --git a/test/test_estimators/test_estimators.py b/test/test_estimators/test_estimators.py index cd4b0922de..0962179f34 100644 --- a/test/test_estimators/test_estimators.py +++ b/test/test_estimators/test_estimators.py @@ -28,7 +28,7 @@ import autosklearn.pipeline.util as putil from autosklearn.automl import AutoMLClassifier from autosklearn.data.validation import InputValidator -from autosklearn.ensemble_builder import MODEL_FN_RE +from autosklearn.ensemble_building.builder import MODEL_FN_RE from autosklearn.estimators import ( AutoSklearnClassifier, AutoSklearnEstimator, From c0ebad5d7c711f5c4d948eafce2ab2f6b183e235 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 27 Mar 2022 21:34:34 +0200 Subject: [PATCH 013/117] Tidy up init of EnsembleBuilder --- autosklearn/ensemble_building/builder.py | 190 ++++++++++++----------- 1 file changed, 97 insertions(+), 93 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 63b8f76b50..20dd4d70a0 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -1,7 +1,8 @@ from __future__ import annotations -from typing import List, Optional, Tuple +from typing import Any, List, Optional, Tuple +from pathlib import Path import glob import gzip import logging.handlers @@ -38,6 +39,9 @@ class EnsembleBuilder: + + model_fn_re = re.compile(MODEL_FN_RE) + def __init__( self, backend: Backend, @@ -132,106 +136,113 @@ def __init__( if max_models_on_disc is not None and max_models_on_disc < 0: raise ValueError("max_models_on_disc must be positive or None") - self.backend = backend - self.dataset_name = dataset_name - self.task_type = task_type - self.metric = metric - self.ensemble_size = ensemble_size - self.performance_range_threshold = performance_range_threshold - self.ensemble_nbest = ensemble_nbest - self.max_models_on_disc = max_models_on_disc self.seed = seed + self.metric = metric + self.backend = backend self.precision = precision + self.task_type = task_type self.memory_limit = memory_limit self.read_at_most = read_at_most self.random_state = random_state + self.dataset_name = dataset_name + self.ensemble_size = ensemble_size + self.ensemble_nbest = ensemble_nbest + self.max_models_on_disc = max_models_on_disc + self.performance_range_threshold = performance_range_threshold # max_resident_models keeps the maximum number of models in disc self.max_resident_models: int | None = None - # Setup the logger - self.logger = get_named_client_logger(name="EnsembleBuilder", port=logger_port) - self.logger_port = logger_port + # The starting time of the procedure + self.start_time = 0 - if ensemble_nbest == 1: - self.logger.debug( - f"Behaviour dep. on int/float: {ensemble_nbest}:{type(ensemble_nbest)}" - ) + # Hash of the last ensemble training data to identify it + self.last_hash = None - self.start_time = 0 - self.model_fn_re = re.compile(MODEL_FN_RE) - self.last_hash = None # hash of ensemble training data - self.y_true_ensemble = None - - # already read prediction files - # {"file name": { - # "ens_loss": float - # "mtime_ens": str, - # "mtime_valid": str, - # "mtime_test": str, - # "seed": int, - # "num_run": int, - # }} - self.read_losses = {} - # {"file_name": { - # Y_ENSEMBLE: np.ndarray - # Y_VALID: np.ndarray - # Y_TEST: np.ndarray - # } - # } - self.read_preds = {} - - # Depending on the dataset dimensions, - # regenerating every iteration, the predictions - # losses for self.read_preds - # is too computationally expensive - # As the ensemble builder is stateless - # (every time the ensemble builder gets resources - # from dask, it builds this object from scratch) - # we save the state of this dictionary to memory - # and read it if available - self.ensemble_memory_file = os.path.join( - self.backend.internals_directory, "ensemble_read_preds.pkl" - ) - if os.path.exists(self.ensemble_memory_file): - try: - with (open(self.ensemble_memory_file, "rb")) as memory: - self.read_preds, self.last_hash = pickle.load(memory) - except Exception as e: - self.logger.warning( - "Could not load the previous iterations of ensemble_builder" - " predictions. This might impact the quality of the run." - f" Exception={e} {traceback.format_exc()}" - ) - self.ensemble_loss_file = os.path.join( - self.backend.internals_directory, "ensemble_read_losses.pkl" - ) - if os.path.exists(self.ensemble_loss_file): - try: - with (open(self.ensemble_loss_file, "rb")) as memory: - self.read_losses = pickle.load(memory) - except Exception as e: - self.logger.warning( - "Could not load the previous iterations of ensemble_builder losses." - "This might impact the quality of the run. Exception={} {}".format( - e, - traceback.format_exc(), - ) - ) + # The cached values of the true targets for the ensemble + self.y_true_ensemble: int | None = None - # hidden feature which can be activated via an environment variable. - # This keeps all models and predictions which have ever been a candidate. - # This is necessary to post-hoc compute the whole ensemble building trajectory. - self._has_been_candidate = set() + # Track the ensemble performance + self.ensemble_history = [] + + # Setup the logger + self.logger = get_named_client_logger(name="EnsembleBuilder", port=logger_port) + self.logger_port = logger_port + # Keep running knowledge of its validation performance self.validation_performance_ = np.inf - # Track the ensemble performance + # Data we may need datamanager = self.backend.load_datamanager() self.y_valid = datamanager.data.get("Y_valid") self.y_test = datamanager.data.get("Y_test") - del datamanager - self.ensemble_history = [] + + # Log the behaviour + if ensemble_nbest == 1: + t = type(ensemble_nbest) + self.logger.debug(f"Using behaviour when {t} for {ensemble_nbest}:{t}") + + # Read in previous state if any exists + # + # Depending on the dataset dimensions, regenerating every iteration, the + # predictions losses for self.read_preds is too computationally expensive + # As the ensemble builder is stateless (every time the ensemble builder gets + # resources from dask, it builds this object from scratch) + # we save the state of this dictionary to memory + # and read it if available + + # { + # "file_name": { + # Y_ENSEMBLE: np.ndarray + # Y_VALID: np.ndarray + # Y_TEST: np.ndarray + # } + # } + cached_preds, last_hash = self.cached_preds + self.read_preds: dict[str, dict[int, np.ndarray]] = cached_preds + self.last_hash: str | None = last_hash + + # { + # "file name": { + # "ens_loss": float + # "mtime_ens": str, + # "mtime_valid": str, + # "mtime_test": str, + # "seed": int, + # "num_run": int, + # } + # } + self.read_losses: dict[str, dict[str, Any]] = self.cached_losses + + @property + def cached_preds_path(self) -> Path: + """Path to the cached predictions we store between runs""" + return Path(self.backend.internals_directory) / "ensemble_read_preds.pkl" + + @property + def cached_losses_path(self) -> Path: + """Path to the cached losses we store between runs""" + return Path(self.backend.internals_directory) / "ensemble_read_losses.pkl" + + @property + def cached_preds(self) -> Tuple[dict, str]: + """""" + if self.cached_preds_path.exists(): + with self.cached_preds_path.open("rb") as memory: + preds, last_hash = pickle.load(memory) + return (preds, last_hash) + else: + return ({}, None) + + @property + def cached_losses(self) -> dict[str, dict[int, np.ndarray]]: + """""" + if self.cached_losses_path.exists(): + with self.cached_preds_path.open("rb") as memory: + losses = pickle.load(memory) + return losses + else: + return {} def run( self, @@ -287,7 +298,7 @@ def run( # ATTENTION: main will start from scratch; # all data structures are empty again try: - os.remove(self.ensemble_memory_file) + self.cached_preds_path.unlink() except: # noqa E722 pass @@ -431,10 +442,6 @@ def main(self, time_left, iteration, return_predictions): n_sel_test = [] n_sel_valid = [] - if os.environ.get("ENSEMBLE_KEEP_ALL_CANDIDATES"): - for candidate in candidate_models: - self._has_been_candidate.add(candidate) - # train ensemble ensemble = self.fit_ensemble(selected_keys=candidate_models) @@ -449,7 +456,7 @@ def main(self, time_left, iteration, return_predictions): self._delete_excess_models(selected_keys=candidate_models) # Save the read losses status for the next iteration - with open(self.ensemble_loss_file, "wb") as memory: + with open(self.cached_losses_path, "wb") as memory: pickle.dump(self.read_losses, memory) if ensemble is not None: @@ -483,7 +490,7 @@ def main(self, time_left, iteration, return_predictions): # The loaded predictions and hash can only be saved after the ensemble has been # built, because the hash is computed during the construction of the ensemble - with open(self.ensemble_memory_file, "wb") as memory: + with self.cached_preds_path.open("wb") as memory: pickle.dump((self.read_preds, self.last_hash), memory) if return_predictions: @@ -1209,9 +1216,6 @@ def _delete_excess_models(self, selected_keys: List[str]): if pred_path in selected_keys: continue - if pred_path in self._has_been_candidate: - continue - match = self.model_fn_re.search(pred_path) _seed = int(match.group(1)) _num_run = int(match.group(2)) From 07d2c5590a18988593daec4b96e0582d3c3ee731 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 27 Mar 2022 22:17:19 +0200 Subject: [PATCH 014/117] Moved to cached properties --- autosklearn/ensemble_building/builder.py | 115 +++++++++++++---------- 1 file changed, 66 insertions(+), 49 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 20dd4d70a0..c0ab03a1c7 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -191,58 +191,73 @@ def __init__( # we save the state of this dictionary to memory # and read it if available - # { - # "file_name": { - # Y_ENSEMBLE: np.ndarray - # Y_VALID: np.ndarray - # Y_TEST: np.ndarray - # } - # } - cached_preds, last_hash = self.cached_preds - self.read_preds: dict[str, dict[int, np.ndarray]] = cached_preds - self.last_hash: str | None = last_hash - - # { - # "file name": { - # "ens_loss": float - # "mtime_ens": str, - # "mtime_valid": str, - # "mtime_test": str, - # "seed": int, - # "num_run": int, - # } - # } - self.read_losses: dict[str, dict[str, Any]] = self.cached_losses + self._run_predictions = None + self._last_hash = None + self._run_info = None + + # TODO remove + prior_predictions, last_hash = self.run_predictions + self.read_preds = prior_predictions + self.last_hash = last_hash + self.read_losses = self.run_info @property - def cached_preds_path(self) -> Path: + def run_predictions_path(self) -> Path: """Path to the cached predictions we store between runs""" return Path(self.backend.internals_directory) / "ensemble_read_preds.pkl" @property - def cached_losses_path(self) -> Path: + def run_info_path(self) -> Path: """Path to the cached losses we store between runs""" return Path(self.backend.internals_directory) / "ensemble_read_losses.pkl" @property - def cached_preds(self) -> Tuple[dict, str]: - """""" - if self.cached_preds_path.exists(): - with self.cached_preds_path.open("rb") as memory: - preds, last_hash = pickle.load(memory) - return (preds, last_hash) - else: - return ({}, None) + def run_predictions(self) -> Tuple[dict, str]: + """Get the cached predictions from previous runs + { + "file_name": { + Y_ENSEMBLE: np.ndarray + Y_VALID: np.ndarray + Y_TEST: np.ndarray + } + } + """ + if self._run_predictions is None: + if self.run_predictions_path.exists(): + with self.run_predictions_path.open("rb") as memory: + preds, last_hash = pickle.load(memory) + + self._run_predictions = preds + self._last_hash = last_hash + else: + self._run_predictions = {} + self._last_hash = "" + + return (self._run_predictions, self._last_hash) @property - def cached_losses(self) -> dict[str, dict[int, np.ndarray]]: - """""" - if self.cached_losses_path.exists(): - with self.cached_preds_path.open("rb") as memory: - losses = pickle.load(memory) - return losses - else: - return {} + def run_info(self) -> dict[str, dict[int, np.ndarray]]: + """Get the cached information from previous runs + { + "file name": { + "ens_loss": float + "mtime_ens": str, + "mtime_valid": str, + "mtime_test": str, + "seed": int, + "num_run": int, + } + } + """ + if self._run_info is None: + if self.run_info_path.exists(): + with self.run_info.open("rb") as memory: + info = pickle.load(memory) + self._run_info = info + else: + self._run_info = {} + + return self._run_info def run( self, @@ -253,9 +268,9 @@ def run( time_buffer=5, return_predictions: bool = False, ): - if time_left is None and end_at is None: raise ValueError("Must provide either time_left or end_at.") + elif time_left is not None and end_at is not None: raise ValueError("Cannot provide both time_left and end_at.") @@ -298,7 +313,7 @@ def run( # ATTENTION: main will start from scratch; # all data structures are empty again try: - self.cached_preds_path.unlink() + self.run_predictions_path.unlink() except: # noqa E722 pass @@ -340,7 +355,6 @@ def run( return [], self.ensemble_nbest, None, None, None def main(self, time_left, iteration, return_predictions): - # Pynisher jobs inside dask 'forget' # the logger configuration. So we have to set it up # accordingly @@ -455,9 +469,10 @@ def main(self, time_left, iteration, return_predictions): if self.max_resident_models is not None: self._delete_excess_models(selected_keys=candidate_models) - # Save the read losses status for the next iteration - with open(self.cached_losses_path, "wb") as memory: - pickle.dump(self.read_losses, memory) + # Save the read losses status for the next iteration, we should do this + # before doing predictions as this is a likely place of memory issues + with self.run_info_path.open("wb") as f: + pickle.dump(self.read_losses, f) if ensemble is not None: train_pred = self.predict( @@ -490,8 +505,9 @@ def main(self, time_left, iteration, return_predictions): # The loaded predictions and hash can only be saved after the ensemble has been # built, because the hash is computed during the construction of the ensemble - with self.cached_preds_path.open("wb") as memory: - pickle.dump((self.read_preds, self.last_hash), memory) + with self.run_predictions_path.open("wb") as f: + item = (self.read_preds, self.last_hash) + pickle.dump(item, f) if return_predictions: return ( @@ -545,7 +561,6 @@ def compute_loss_per_model(self) -> bool: bool Whether it successfully computed losses """ - self.logger.debug("Read ensemble data set predictions") if self.y_true_ensemble is None: @@ -1130,12 +1145,14 @@ def _add_ensemble_trajectory(self, train_pred, valid_pred, test_pred): train_pred = np.vstack( ((1 - train_pred).reshape((1, -1)), train_pred.reshape((1, -1))) ).transpose() + if valid_pred is not None and ( len(valid_pred.shape) == 1 or valid_pred.shape[1] == 1 ): valid_pred = np.vstack( ((1 - valid_pred).reshape((1, -1)), valid_pred.reshape((1, -1))) ).transpose() + if test_pred is not None and ( len(test_pred.shape) == 1 or test_pred.shape[1] == 1 ): From 8ac8ffec9664429c7d057eb08b50c567514ce862 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 27 Mar 2022 23:17:40 +0200 Subject: [PATCH 015/117] Change List to list --- autosklearn/ensemble_building/builder.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index c0ab03a1c7..891fbec069 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any, List, Optional, Tuple +from typing import Any, Tuple from pathlib import Path import glob @@ -882,8 +882,8 @@ def get_n_best_preds(self): return sorted_keys[:ensemble_n_best] def get_valid_test_preds( - self, selected_keys: List[str] - ) -> Tuple[List[str], List[str]]: + self, selected_keys: list[str] + ) -> Tuple[list[str], list[str]]: """Get valid and test predictions from disc and store them in self.read_preds Parameters @@ -1217,7 +1217,7 @@ def _get_list_of_sorted_preds(self): ) return sorted_keys - def _delete_excess_models(self, selected_keys: List[str]): + def _delete_excess_models(self, selected_keys: list[str]): """ Deletes models excess models on disc. self.max_models_on_disc defines the upper limit on how many models to keep. From 6472714a3a09fe725eb4edca5a0f06f0052a0a95 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 27 Mar 2022 23:33:04 +0200 Subject: [PATCH 016/117] Move to solely using cached properties --- autosklearn/ensemble_building/builder.py | 229 +++++++++--------- .../test_3_models/test_3_models.py | 52 ++-- 2 files changed, 135 insertions(+), 146 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 891fbec069..89a9b7af3a 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -2,7 +2,6 @@ from typing import Any, Tuple -from pathlib import Path import glob import gzip import logging.handlers @@ -16,6 +15,7 @@ import time import traceback import zlib +from pathlib import Path import numpy as np import pandas as pd @@ -115,7 +115,7 @@ def __init__( precision: int [16 | 32 | 64 | 128] = 32 precision of floats to read the predictions - memory_limit: Optional[int] = 1024 + memory_limit: int | None = 1024 memory limit in mb. If ``None``, no memory limit is enforced. read_at_most: int = 5 @@ -156,9 +156,6 @@ def __init__( # The starting time of the procedure self.start_time = 0 - # Hash of the last ensemble training data to identify it - self.last_hash = None - # The cached values of the true targets for the ensemble self.y_true_ensemble: int | None = None @@ -182,24 +179,14 @@ def __init__( t = type(ensemble_nbest) self.logger.debug(f"Using behaviour when {t} for {ensemble_nbest}:{t}") - # Read in previous state if any exists - # - # Depending on the dataset dimensions, regenerating every iteration, the - # predictions losses for self.read_preds is too computationally expensive - # As the ensemble builder is stateless (every time the ensemble builder gets - # resources from dask, it builds this object from scratch) - # we save the state of this dictionary to memory - # and read it if available - + # The cached set of run_predictions which could come from previous instances self._run_predictions = None + + # Hash of the last ensemble training data to identify it self._last_hash = None - self._run_info = None - # TODO remove - prior_predictions, last_hash = self.run_predictions - self.read_preds = prior_predictions - self.last_hash = last_hash - self.read_losses = self.run_info + # The cached info of runs which could come from previous instances + self._run_info = None @property def run_predictions_path(self) -> Path: @@ -212,7 +199,7 @@ def run_info_path(self) -> Path: return Path(self.backend.internals_directory) / "ensemble_read_losses.pkl" @property - def run_predictions(self) -> Tuple[dict, str]: + def run_predictions(self) -> dict[str, dict[int, np.ndarray]]: """Get the cached predictions from previous runs { "file_name": { @@ -223,20 +210,32 @@ def run_predictions(self) -> Tuple[dict, str]: } """ if self._run_predictions is None: - if self.run_predictions_path.exists(): - with self.run_predictions_path.open("rb") as memory: - preds, last_hash = pickle.load(memory) + self._run_predictions = {} + self._last_hash = "" - self._run_predictions = preds - self._last_hash = last_hash - else: - self._run_predictions = {} - self._last_hash = "" + path = self.run_predictions_path + if path.exists(): + with path.open("rb") as memory: + self._run_predictions, self._last_hash = pickle.load(memory) - return (self._run_predictions, self._last_hash) + return self._run_predictions @property - def run_info(self) -> dict[str, dict[int, np.ndarray]]: + def last_hash(self) -> str: + """Get the last hash associated with the run predictions""" + if self._last_hash is None: + self._run_predictions = {} + self._last_hash = "" + + path = self.run_predictions_path + if path.exists(): + with path.open("rb") as memory: + self._run_predictions, self._last_hash = pickle.load(memory) + + return self._last_hash + + @property + def run_info(self) -> dict[str, dict[str, Any]]: """Get the cached information from previous runs { "file name": { @@ -250,12 +249,12 @@ def run_info(self) -> dict[str, dict[int, np.ndarray]]: } """ if self._run_info is None: - if self.run_info_path.exists(): - with self.run_info.open("rb") as memory: - info = pickle.load(memory) - self._run_info = info - else: - self._run_info = {} + self._run_info = {} + + path = self.run_info_path + if path.exists(): + with path.open("rb") as memory: + self._run_info = pickle.load(memory) return self._run_info @@ -263,8 +262,8 @@ def run( self, iteration: int, pynisher_context: str, - time_left: Optional[float] = None, - end_at: Optional[float] = None, + time_left: float | None = None, + end_at: float | None = None, time_buffer=5, return_predictions: bool = False, ): @@ -373,7 +372,7 @@ def main(self, time_left, iteration, return_predictions): time_left - used_time, ) - # populates self.read_preds and self.read_losses + # populates self.run_predictions and self.run_info if not self.compute_loss_per_model(): if return_predictions: return ( @@ -401,7 +400,7 @@ def main(self, time_left, iteration, return_predictions): else: return self.ensemble_history, self.ensemble_nbest, None, None, None - # populates predictions in self.read_preds + # populates predictions in self.run_predictions # reduces selected models if file reading failed n_sel_valid, n_sel_test = self.get_valid_test_preds( selected_keys=candidate_models @@ -472,7 +471,7 @@ def main(self, time_left, iteration, return_predictions): # Save the read losses status for the next iteration, we should do this # before doing predictions as this is a likely place of memory issues with self.run_info_path.open("wb") as f: - pickle.dump(self.read_losses, f) + pickle.dump(self.run_info, f) if ensemble is not None: train_pred = self.predict( @@ -506,7 +505,7 @@ def main(self, time_left, iteration, return_predictions): # The loaded predictions and hash can only be saved after the ensemble has been # built, because the hash is computed during the construction of the ensemble with self.run_predictions_path.open("wb") as f: - item = (self.read_preds, self.last_hash) + item = (self.run_predictions, self.last_hash) pickle.dump(item, f) if return_predictions: @@ -548,13 +547,13 @@ def get_disk_consumption(self, pred_path): def compute_loss_per_model(self) -> bool: """Compute the loss of the predictions on ensemble building data set; - populates self.read_preds and self.read_losses + populates self.run_predictions and self.run_info Side-effects ------------ * Populates - `self.y_ens_files` all the ensemble predictions it could find for runs - - `self.read_losses` with the new losses it calculated + - `self.run_info` with the new losses it calculated Returns ------- @@ -619,8 +618,8 @@ def compute_loss_per_model(self) -> bool: ) continue - if not self.read_losses.get(y_ens_fn): - self.read_losses[y_ens_fn] = { + if not self.run_info.get(y_ens_fn): + self.run_info[y_ens_fn] = { "ens_loss": np.inf, "mtime_ens": 0, "mtime_valid": 0, @@ -636,14 +635,14 @@ def compute_loss_per_model(self) -> bool: # 3 - deleted from disk due to space constraints "loaded": 0, } - if not self.read_preds.get(y_ens_fn): - self.read_preds[y_ens_fn] = { + if not self.run_predictions.get(y_ens_fn): + self.run_predictions[y_ens_fn] = { Y_ENSEMBLE: None, Y_VALID: None, Y_TEST: None, } - if self.read_losses[y_ens_fn]["mtime_ens"] == mtime: + if self.run_info[y_ens_fn]["mtime_ens"] == mtime: # same time stamp; nothing changed; continue @@ -658,24 +657,24 @@ def compute_loss_per_model(self) -> bool: scoring_functions=None, ) - if np.isfinite(self.read_losses[y_ens_fn]["ens_loss"]): + if np.isfinite(self.run_info[y_ens_fn]["ens_loss"]): self.logger.debug( "Changing ensemble loss for file %s from %f to %f " "because file modification time changed? %f - %f", y_ens_fn, - self.read_losses[y_ens_fn]["ens_loss"], + self.run_info[y_ens_fn]["ens_loss"], loss, - self.read_losses[y_ens_fn]["mtime_ens"], + self.run_info[y_ens_fn]["mtime_ens"], os.path.getmtime(y_ens_fn), ) - self.read_losses[y_ens_fn]["ens_loss"] = loss + self.run_info[y_ens_fn]["ens_loss"] = loss # It is not needed to create the object here # To save memory, we just compute the loss. - self.read_losses[y_ens_fn]["mtime_ens"] = os.path.getmtime(y_ens_fn) - self.read_losses[y_ens_fn]["loaded"] = 2 - self.read_losses[y_ens_fn][ + self.run_info[y_ens_fn]["mtime_ens"] = os.path.getmtime(y_ens_fn) + self.run_info[y_ens_fn]["loaded"] = 2 + self.run_info[y_ens_fn][ "disc_space_cost_mb" ] = self.get_disk_consumption(y_ens_fn) @@ -687,18 +686,18 @@ def compute_loss_per_model(self) -> bool: y_ens_fn, traceback.format_exc(), ) - self.read_losses[y_ens_fn]["ens_loss"] = np.inf + self.run_info[y_ens_fn]["ens_loss"] = np.inf self.logger.debug( "Done reading %d new prediction files. Loaded %d predictions in " "total.", n_read_files, - np.sum([pred["loaded"] > 0 for pred in self.read_losses.values()]), + np.sum([pred["loaded"] > 0 for pred in self.run_info.values()]), ) return True def get_n_best_preds(self): """ - get best n predictions (i.e., keys of self.read_losses) + get best n predictions (i.e., keys of self.run_info) according to the loss on the "ensemble set" n: self.ensemble_nbest @@ -742,7 +741,7 @@ def get_n_best_preds(self): ) sorted_keys = [ (k, v["ens_loss"], v["num_run"]) - for k, v in self.read_losses.items() + for k, v in self.run_info.items() if v["seed"] == self.seed and v["num_run"] == 1 ] # reload predictions if losses changed over time and a model is @@ -776,7 +775,7 @@ def get_n_best_preds(self): v["ens_loss"], v["disc_space_cost_mb"], ] - for v in self.read_losses.values() + for v in self.run_info.values() if v["disc_space_cost_mb"] is not None ] max_consumption = max(c[1] for c in consumption) @@ -854,42 +853,43 @@ def get_n_best_preds(self): # remove loaded predictions for non-winning models for k in sorted_keys[ensemble_n_best:]: - if k in self.read_preds: - self.read_preds[k][Y_ENSEMBLE] = None - self.read_preds[k][Y_VALID] = None - self.read_preds[k][Y_TEST] = None - if self.read_losses[k]["loaded"] == 1: + if k in self.run_predictions: + self.run_predictions[k][Y_ENSEMBLE] = None + self.run_predictions[k][Y_VALID] = None + self.run_predictions[k][Y_TEST] = None + if self.run_info[k]["loaded"] == 1: self.logger.debug( "Dropping model %s (%d,%d) with loss %f.", k, - self.read_losses[k]["seed"], - self.read_losses[k]["num_run"], - self.read_losses[k]["ens_loss"], + self.run_info[k]["seed"], + self.run_info[k]["num_run"], + self.run_info[k]["ens_loss"], ) - self.read_losses[k]["loaded"] = 2 + self.run_info[k]["loaded"] = 2 # Load the predictions for the winning for k in sorted_keys[:ensemble_n_best]: if ( - k not in self.read_preds or self.read_preds[k][Y_ENSEMBLE] is None - ) and self.read_losses[k]["loaded"] != 3: - self.read_preds[k][Y_ENSEMBLE] = self._read_np_fn(k) + k not in self.run_predictions + or self.run_predictions[k][Y_ENSEMBLE] is None + ) and self.run_info[k]["loaded"] != 3: + self.run_predictions[k][Y_ENSEMBLE] = self._read_np_fn(k) # No need to load valid and test here because they are loaded # only if the model ends up in the ensemble - self.read_losses[k]["loaded"] = 1 + self.run_info[k]["loaded"] = 1 - # return keys of self.read_losses with lowest losses + # return keys of self.run_info with lowest losses return sorted_keys[:ensemble_n_best] def get_valid_test_preds( self, selected_keys: list[str] ) -> Tuple[list[str], list[str]]: - """Get valid and test predictions from disc and store them in self.read_preds + """Get valid and test predictions from disc and store them in self.run_predictions Parameters --------- selected_keys: list - list of selected keys of self.read_preds + list of selected keys of self.run_predictions Return ------ @@ -906,15 +906,15 @@ def get_valid_test_preds( glob.escape(self.backend.get_runs_directory()), "%d_%d_%s" % ( - self.read_losses[k]["seed"], - self.read_losses[k]["num_run"], - self.read_losses[k]["budget"], + self.run_info[k]["seed"], + self.run_info[k]["num_run"], + self.run_info[k]["budget"], ), "predictions_valid_%d_%d_%s.npy*" % ( - self.read_losses[k]["seed"], - self.read_losses[k]["num_run"], - self.read_losses[k]["budget"], + self.run_info[k]["seed"], + self.run_info[k]["num_run"], + self.run_info[k]["budget"], ), ) ) @@ -928,15 +928,15 @@ def get_valid_test_preds( glob.escape(self.backend.get_runs_directory()), "%d_%d_%s" % ( - self.read_losses[k]["seed"], - self.read_losses[k]["num_run"], - self.read_losses[k]["budget"], + self.run_info[k]["seed"], + self.run_info[k]["num_run"], + self.run_info[k]["budget"], ), "predictions_test_%d_%d_%s.npy*" % ( - self.read_losses[k]["seed"], - self.read_losses[k]["num_run"], - self.read_losses[k]["budget"], + self.run_info[k]["seed"], + self.run_info[k]["num_run"], + self.run_info[k]["budget"], ), ) ) @@ -954,17 +954,17 @@ def get_valid_test_preds( else: valid_fn = valid_fn[0] if ( - self.read_losses[k]["mtime_valid"] == os.path.getmtime(valid_fn) - and k in self.read_preds - and self.read_preds[k][Y_VALID] is not None + self.run_info[k]["mtime_valid"] == os.path.getmtime(valid_fn) + and k in self.run_predictions + and self.run_predictions[k][Y_VALID] is not None ): success_keys_valid.append(k) continue try: y_valid = self._read_np_fn(valid_fn) - self.read_preds[k][Y_VALID] = y_valid + self.run_predictions[k][Y_VALID] = y_valid success_keys_valid.append(k) - self.read_losses[k]["mtime_valid"] = os.path.getmtime(valid_fn) + self.run_info[k]["mtime_valid"] = os.path.getmtime(valid_fn) except Exception: self.logger.warning( "Error loading %s: %s", valid_fn, traceback.format_exc() @@ -978,17 +978,17 @@ def get_valid_test_preds( else: test_fn = test_fn[0] if ( - self.read_losses[k]["mtime_test"] == os.path.getmtime(test_fn) - and k in self.read_preds - and self.read_preds[k][Y_TEST] is not None + self.run_info[k]["mtime_test"] == os.path.getmtime(test_fn) + and k in self.run_predictions + and self.run_predictions[k][Y_TEST] is not None ): success_keys_test.append(k) continue try: y_test = self._read_np_fn(test_fn) - self.read_preds[k][Y_TEST] = y_test + self.run_predictions[k][Y_TEST] = y_test success_keys_test.append(k) - self.read_losses[k]["mtime_test"] = os.path.getmtime(test_fn) + self.run_info[k]["mtime_test"] = os.path.getmtime(test_fn) except Exception: self.logger.warning( "Error loading %s: %s", test_fn, traceback.format_exc() @@ -1001,19 +1001,19 @@ def fit_ensemble(self, selected_keys: list): Parameters --------- selected_keys: list - list of selected keys of self.read_losses + list of selected keys of self.run_info Returns ------- ensemble: EnsembleSelection trained Ensemble """ - predictions_train = [self.read_preds[k][Y_ENSEMBLE] for k in selected_keys] + predictions_train = [self.run_predictions[k][Y_ENSEMBLE] for k in selected_keys] include_num_runs = [ ( - self.read_losses[k]["seed"], - self.read_losses[k]["num_run"], - self.read_losses[k]["budget"], + self.run_info[k]["seed"], + self.run_info[k]["num_run"], + self.run_info[k]["budget"], ) for k in selected_keys ] @@ -1033,7 +1033,8 @@ def fit_ensemble(self, selected_keys: list): ) return None - self.last_hash = current_hash + + self._last_hash = current_hash ensemble = EnsembleSelection( ensemble_size=self.ensemble_size, @@ -1089,7 +1090,7 @@ def predict( ensemble: EnsembleSelection trained Ensemble selected_keys: list - list of selected keys of self.read_losses + list of selected keys of self.run_info n_preds: int number of prediction models used for ensemble building same number of predictions on valid and test are necessary @@ -1108,7 +1109,7 @@ def predict( pred_set = Y_TEST else: pred_set = Y_ENSEMBLE - predictions = [self.read_preds[k][pred_set] for k in selected_keys] + predictions = [self.run_predictions[k][pred_set] for k in selected_keys] if n_preds == len(predictions): y = ensemble.predict(predictions) @@ -1196,7 +1197,7 @@ def _add_ensemble_trajectory(self, train_pred, valid_pred, test_pred): def _get_list_of_sorted_preds(self): """ Returns a list of sorted predictions in descending order - Losses are taken from self.read_losses. + Losses are taken from self.run_info. Parameters ---------- @@ -1209,7 +1210,7 @@ def _get_list_of_sorted_preds(self): # Sort by loss - smaller is better! sorted_keys = list( sorted( - [(k, v["ens_loss"], v["num_run"]) for k, v in self.read_losses.items()], + [(k, v["ens_loss"], v["num_run"]) for k, v in self.run_info.items()], # Sort by loss as priority 1 and then by num_run on a ascending order # We want small num_run first key=lambda x: (x[1], x[2]), @@ -1223,9 +1224,7 @@ def _delete_excess_models(self, selected_keys: list[str]): defines the upper limit on how many models to keep. Any additional model with a worst loss than the top self.max_models_on_disc is deleted. - """ - # Loop through the files currently in the directory for pred_path in self.y_ens_files: @@ -1247,9 +1246,9 @@ def _delete_excess_models(self, selected_keys: list[str]): os.rename(numrun_dir, numrun_dir + ".old") shutil.rmtree(numrun_dir + ".old") self.logger.info("Deleted files of non-candidate model %s", pred_path) - self.read_losses[pred_path]["disc_space_cost_mb"] = None - self.read_losses[pred_path]["loaded"] = 3 - self.read_losses[pred_path]["ens_loss"] = np.inf + self.run_info[pred_path]["disc_space_cost_mb"] = None + self.run_info[pred_path]["loaded"] = 3 + self.run_info[pred_path]["ens_loss"] = np.inf except Exception as e: self.logger.error( "Failed to delete files of non-candidate model %s due" diff --git a/test/test_ensemble_builder/test_3_models/test_3_models.py b/test/test_ensemble_builder/test_3_models/test_3_models.py index 73a9462093..3481897a1d 100644 --- a/test/test_ensemble_builder/test_3_models/test_3_models.py +++ b/test/test_ensemble_builder/test_3_models/test_3_models.py @@ -29,19 +29,19 @@ def test_read(ensemble_backend: Backend) -> None: ) success = ensbuilder.compute_loss_per_model() - assert success, f"read_preds = {str(ensbuilder.read_preds)}" + assert success, f"run_predictions = {str(ensbuilder.run_predictions)}" - assert len(ensbuilder.read_preds) == 3, ensbuilder.read_preds.keys() - assert len(ensbuilder.read_losses) == 3, ensbuilder.read_losses.keys() + assert len(ensbuilder.run_predictions) == 3, ensbuilder.run_predictions.keys() + assert len(ensbuilder.run_info) == 3, ensbuilder.run_info.keys() runsdir = Path(ensemble_backend.get_runs_directory()) preds_1 = runsdir / "0_1_0.0" / "predictions_ensemble_0_1_0.0.npy" preds_2 = runsdir / "0_2_0.0" / "predictions_ensemble_0_2_0.0.npy" preds_3 = runsdir / "0_3_100.0" / "predictions_ensemble_0_3_100.0.npy" - assert ensbuilder.read_losses[str(preds_1)]["ens_loss"] == 0.5 - assert ensbuilder.read_losses[str(preds_2)]["ens_loss"] == 0.0 - assert ensbuilder.read_losses[str(preds_3)]["ens_loss"] == 0.0 + assert ensbuilder.run_info[str(preds_1)]["ens_loss"] == 0.5 + assert ensbuilder.run_info[str(preds_2)]["ens_loss"] == 0.0 + assert ensbuilder.run_info[str(preds_3)]["ens_loss"] == 0.0 @parametrize( @@ -175,17 +175,13 @@ def test_fall_back_nbest(ensemble_backend: Backend) -> None: ) ensbuilder.compute_loss_per_model() - print() - print(ensbuilder.read_preds.keys()) - print(ensbuilder.read_losses.keys()) - print(ensemble_backend.temporary_directory) for model in ["0_1_0.0", "0_2_0.0", "0_3_100.0"]: filename = os.path.join( ensemble_backend.temporary_directory, f".auto-sklearn/runs/{model}/predictions_ensemble_{model}.npy", ) - ensbuilder.read_losses[filename]["ens_loss"] = -1 + ensbuilder.run_info[filename]["ens_loss"] = -1 sel_keys = ensbuilder.get_n_best_preds() @@ -236,20 +232,20 @@ def test_get_valid_test_preds(ensemble_backend: Backend) -> None: ensbuilder.get_valid_test_preds(selected_keys=sel_keys) # Number of read files should be three and contain those of the models in the setup - assert set(ensbuilder.read_preds.keys()) == set(paths) + assert set(ensbuilder.run_predictions.keys()) == set(paths) selected = sel_keys non_selected = set(paths) - set(sel_keys) # not selected --> should still be None for key in non_selected: - assert ensbuilder.read_preds[key][Y_VALID] is None - assert ensbuilder.read_preds[key][Y_TEST] is None + assert ensbuilder.run_predictions[key][Y_VALID] is None + assert ensbuilder.run_predictions[key][Y_TEST] is None # selected --> read valid and test predictions for key in selected: - assert ensbuilder.read_preds[key][Y_VALID] is not None - assert ensbuilder.read_preds[key][Y_TEST] is not None + assert ensbuilder.run_predictions[key][Y_VALID] is not None + assert ensbuilder.run_predictions[key][Y_TEST] is not None @parametrize_with_cases("ensemble_backend", cases=cases) @@ -317,7 +313,7 @@ def test_ensemble_builder_predictions(ensemble_backend: Backend) -> None: # since d2 provides perfect predictions # it should get a higher weight # so that y_valid should be exactly y_valid_d2 - y_valid_d2 = ensbuilder.read_preds[d2][Y_VALID][:, 1] + y_valid_d2 = ensbuilder.run_predictions[d2][Y_VALID][:, 1] np.testing.assert_array_almost_equal(y_valid, y_valid_d2) @@ -331,7 +327,7 @@ def test_main(ensemble_backend: Backend) -> None: Expects ------- - * There should be "read_preds" and "read_losses" saved to file + * There should be "run_predictions" and "run_info" saved to file * There should be 3 model reads * There should be a hash for the preds read in * The true targets should have been read in @@ -357,14 +353,12 @@ def test_main(ensemble_backend: Backend) -> None: ) internals_dir = Path(ensemble_backend.internals_directory) - read_preds_path = internals_dir / "ensemble_read_preds.pkl" - read_losses_path = internals_dir / "ensemble_read_losses.pkl" - assert read_preds_path.exists(), list(internals_dir.iterdir()) - assert read_losses_path.exists(), list(internals_dir.iterdir()) + assert ensbuilder.run_predictions_path.exists(), list(internals_dir.iterdir()) + assert ensbuilder.run_info_path.exists(), list(internals_dir.iterdir()) # There should be three preds read - assert len(ensbuilder.read_preds) == 3 + assert len(ensbuilder.run_predictions) == 3 assert ensbuilder.last_hash is not None assert ensbuilder.y_true_ensemble is not None @@ -431,10 +425,6 @@ def test_limit( ensbuilder.predict = Mock(side_effect=MemoryError) # type: ignore ensbuilder.logger = mock_logger # Mock its logger - internal_dir = Path(ensemble_backend.internals_directory) - read_losses_file = internal_dir / "ensemble_read_losses.pkl" - read_preds_file = internal_dir / "ensemble_read_preds.pkl" - def mtime_mock(filename: str) -> float: """TODO, not really sure why we have to force these""" path = Path(filename) @@ -464,8 +454,8 @@ def mtime_mock(filename: str) -> float: for i, exp_state in enumerate(intermediate_states, start=1): ensbuilder.run(time_left=1000, iteration=0, pynisher_context="fork") - assert read_losses_file.exists() - assert not read_preds_file.exists() + assert ensbuilder.run_info_path.exists() + assert not ensbuilder.run_predictions_path.exists() assert mock_logger.warning.call_count == i # type: ignore @@ -476,8 +466,8 @@ def mtime_mock(filename: str) -> float: # change it's internal state ensbuilder.run(time_left=1000, iteration=0, pynisher_context="fork") - assert read_losses_file.exists() - assert not read_preds_file.exists() + assert ensbuilder.run_info_path.exists() + assert not ensbuilder.run_predictions_path.exists() assert (ensbuilder.ensemble_nbest, ensbuilder.read_at_most) == final_state From 36d7dd6b31c6e746fcb36571ead5d142c99c5b8d Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Mon, 28 Mar 2022 00:04:09 +0200 Subject: [PATCH 017/117] Add disk util file with `sizeof` --- autosklearn/util/disk.py | 42 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 autosklearn/util/disk.py diff --git a/autosklearn/util/disk.py b/autosklearn/util/disk.py new file mode 100644 index 0000000000..af59abc3f7 --- /dev/null +++ b/autosklearn/util/disk.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +from pathlib import Path + +sizes = { + "B": 0, + "KB": 1, + "MB": 2, + "GB": 3, + "TB": 4, +} + + +def sizeof(path: Path | str, unit: str = "B") -> int: + """Get the size of some path object + + Parameters + ---------- + path : Path | str + The path of the file or directory to get the size of + + unit : "B" | "KB" | "MB" | "GB" | "TB" = "B" + What unit to get the answer in + + Returns + ------- + int + The size of the folder/file in the given units + """ + if unit not in sizes: + raise ValueError(f"Not a known unit {unit}") + + if not isinstance(path, Path): + path = Path(path) + + if path.is_file(): + size = path.stat().st_size + else: + size = sum(f.stat().st_size for f in path.glob("**/*") if f.is_file()) + + power = sizes[unit] + return round(size / (1024**power)) From 5c9842f31ae8c79860caa168fcbdb4bb7dae9311 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Mon, 28 Mar 2022 00:15:28 +0200 Subject: [PATCH 018/117] Update tests to use cached mechanism --- autosklearn/ensemble_building/builder.py | 77 +++++++++++++++++-- autosklearn/ensemble_building/manager.py | 10 +-- .../test_ensemble_builder.py | 49 +++++++----- 3 files changed, 103 insertions(+), 33 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 89a9b7af3a..30f0e32f77 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any, Tuple +from typing import Any, Dict, List, Optional, Tuple import glob import gzip @@ -180,13 +180,13 @@ def __init__( self.logger.debug(f"Using behaviour when {t} for {ensemble_nbest}:{t}") # The cached set of run_predictions which could come from previous instances - self._run_predictions = None + self._run_predictions: dict[str, dict[int, np.ndarray]] | None = None # Hash of the last ensemble training data to identify it - self._last_hash = None + self._last_hash: str | None = None # The cached info of runs which could come from previous instances - self._run_info = None + self._run_info: dict[str, dict[str, Any]] | None = None @property def run_predictions_path(self) -> Path: @@ -264,9 +264,43 @@ def run( pynisher_context: str, time_left: float | None = None, end_at: float | None = None, - time_buffer=5, + time_buffer: int = 5, return_predictions: bool = False, - ): + ) -> Tuple[ + list[dict[str, Any]], + int, + np.ndarray | None, + np.ndarray | None, + np.ndarray | None, + ]: + """Run the ensemble building process + + Parameters + ---------- + iteration : int + What iteration to associate with this run + + pynisher_context : str + The pynisher context to run in + + time_left : float | None = None + How much time should be left for this run. Either this or `end_at` must + be provided. + + end_at : float | None = Non + When this run should end. Either this or `time_left` must be provided. + + time_buffer : int = 5 + How much extra time to add as a buffer to this run. This means there is + always some amount of time to do something useful. + + return_predictions : bool = False + Whether run should also return predictions + + Returns + ------- + (ensemble_history, nbest, train_preds, valid_preds, test_preds) + """ if time_left is None and end_at is None: raise ValueError("Must provide either time_left or end_at.") @@ -353,7 +387,35 @@ def run( return [], self.ensemble_nbest, None, None, None - def main(self, time_left, iteration, return_predictions): + def main( + self, + time_left: float, + iteration: int, + return_predictions: bool = False, + ) -> Tuple[ + list[dict[str, Any]], + int, + np.ndarray | None, + np.ndarray | None, + np.ndarray | None, + ]: + """Run the main loop of ensemble building + + Parameters + ---------- + time_left : float + How much time is left for this run + + iteration : int + The iteration of this run + + return_predictions : bool = False + Whether to return predictions or not + + Returns + ------- + (ensemble_history, nbest, train_preds, valid_preds, test_preds) + """ # Pynisher jobs inside dask 'forget' # the logger configuration. So we have to set it up # accordingly @@ -1208,6 +1270,7 @@ def _get_list_of_sorted_preds(self): sorted_keys: list """ # Sort by loss - smaller is better! + print(self.run_info) sorted_keys = list( sorted( [(k, v["ens_loss"], v["num_run"]) for k, v in self.run_info.items()], diff --git a/autosklearn/ensemble_building/manager.py b/autosklearn/ensemble_building/manager.py index c443cc242a..a062a3a3e2 100644 --- a/autosklearn/ensemble_building/manager.py +++ b/autosklearn/ensemble_building/manager.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import List, Optional, Tuple, Union +from typing import Any, Optional, Tuple, Union import logging.handlers import time @@ -277,11 +277,11 @@ def fit_and_return_ensemble( memory_limit: Optional[int] = None, random_state: Optional[Union[int, np.random.RandomState]] = None, ) -> Tuple[ - List[Tuple[int, float, float, float]], + list[dict[str, Any]], int, - Optional[np.ndarray], - Optional[np.ndarray], - Optional[np.ndarray], + np.ndarray | None, + np.ndarray | None, + np.ndarray | None, ]: """ diff --git a/test/test_ensemble_builder/test_ensemble_builder.py b/test/test_ensemble_builder/test_ensemble_builder.py index c621a18262..89f6deef00 100644 --- a/test/test_ensemble_builder/test_ensemble_builder.py +++ b/test/test_ensemble_builder/test_ensemble_builder.py @@ -5,6 +5,8 @@ import time from pathlib import Path +import numpy as np + from autosklearn.automl_common.common.utils.backend import Backend from autosklearn.constants import BINARY_CLASSIFICATION from autosklearn.data.xy_data_manager import XYDataManager @@ -93,7 +95,7 @@ def test_max_models_on_disc_with_float_selects_expected_models( """ # These are arranged so the last one is best, with the lose loss - losses = [ + run_info = [ { "ens_loss": 10 * -n, "num_run": n, @@ -106,11 +108,11 @@ def test_max_models_on_disc_with_float_selects_expected_models( mem_largest = mem_model * mem_largest_mult if largest_is_best: - losses[-1]["disc_space_cost_mb"] = mem_largest + run_info[-1]["disc_space_cost_mb"] = mem_largest else: - losses[0]["disc_space_cost_mb"] = mem_largest + run_info[0]["disc_space_cost_mb"] = mem_largest - nbest = sorted(losses, key=lambda item: item["ens_loss"])[:n_expected] + nbest = sorted(run_info, key=lambda item: item["ens_loss"])[:n_expected] mem_for_nbest = sum(item["disc_space_cost_mb"] for item in nbest) slack = mem_largest # Slack introduced is the size of the largest model @@ -127,15 +129,18 @@ def test_max_models_on_disc_with_float_selects_expected_models( ) # Enter the models, with each model being progressibly better - ensbuilder.read_losses = {f"pred{i}": v for i, v in enumerate(losses, start=1)} + ensbuilder._run_info = { + f"pred{i}": v for i, v in enumerate(run_info, start=1) + } + # Make the last model twice as large if largest_is_best: - ensbuilder.read_losses[f"pred{n_models}"]["disc_space_cost_mb"] = mem_largest + ensbuilder.run_info[f"pred{n_models}"]["disc_space_cost_mb"] = mem_largest else: - ensbuilder.read_losses["pred1"]["disc_space_cost_mb"] = mem_largest + ensbuilder.run_info["pred1"]["disc_space_cost_mb"] = mem_largest - ensbuilder.read_preds = { - f"pred{n}": {Y_ENSEMBLE: True} for n in range(1, n_models + 1) + ensbuilder._run_predictions = { + f"pred{n}": {Y_ENSEMBLE: np.array([1])} for n in range(1, n_models + 1) } sel_keys = ensbuilder.get_n_best_preds() @@ -178,7 +183,7 @@ def test_max_models_on_disc_float_always_preserves_best_model( memory_limit=None, ) - read_losses = { + run_info = { f"pred{n}": { "ens_loss": 10 * -n, "num_run": n + 1, @@ -188,10 +193,12 @@ def test_max_models_on_disc_float_always_preserves_best_model( } for n in range(n_models) } - best_model = min(read_losses, key=lambda m: read_losses[m]["ens_loss"]) + best_model = min(run_info, key=lambda m: run_info[m]["ens_loss"]) - ensbuilder.read_losses = read_losses - ensbuilder.read_preds = {f"pred{n}": {Y_ENSEMBLE: True} for n in range(n_models)} + ensbuilder._run_info = run_info + ensbuilder._run_predictions = { + f"pred{n}": {Y_ENSEMBLE: np.array([1])} for n in range(n_models) + } sel_keys = ensbuilder.get_n_best_preds() assert [best_model] == sel_keys @@ -231,16 +238,16 @@ def test_performance_range_threshold( seed=DEFAULT_SEED, performance_range_threshold=performance_range_threshold, ) - ensbuilder.read_losses = { + ensbuilder._run_info = { "A": {"ens_loss": -1, "num_run": 1, "loaded": -1, "seed": DEFAULT_SEED}, "B": {"ens_loss": -2, "num_run": 2, "loaded": -1, "seed": DEFAULT_SEED}, "C": {"ens_loss": -3, "num_run": 3, "loaded": -1, "seed": DEFAULT_SEED}, "D": {"ens_loss": -4, "num_run": 4, "loaded": -1, "seed": DEFAULT_SEED}, "E": {"ens_loss": -5, "num_run": 5, "loaded": -1, "seed": DEFAULT_SEED}, } - ensbuilder.read_preds = { - name: {preds_key: True for preds_key in (Y_ENSEMBLE, Y_VALID, Y_TEST)} - for name in ensbuilder.read_losses + ensbuilder._run_predictions = { + name: {preds_key: np.array([1]) for preds_key in (Y_ENSEMBLE, Y_VALID, Y_TEST)} + for name in ensbuilder._run_info } sel_keys = ensbuilder.get_n_best_preds() @@ -294,16 +301,16 @@ def test_performance_range_threshold_with_ensemble_nbest( performance_range_threshold=performance_range_threshold, max_models_on_disc=None, ) - ensbuilder.read_losses = { + ensbuilder._run_info = { "A": {"ens_loss": -1, "num_run": 1, "loaded": -1, "seed": DEFAULT_SEED}, "B": {"ens_loss": -2, "num_run": 2, "loaded": -1, "seed": DEFAULT_SEED}, "C": {"ens_loss": -3, "num_run": 3, "loaded": -1, "seed": DEFAULT_SEED}, "D": {"ens_loss": -4, "num_run": 4, "loaded": -1, "seed": DEFAULT_SEED}, "E": {"ens_loss": -5, "num_run": 5, "loaded": -1, "seed": DEFAULT_SEED}, } - ensbuilder.read_preds = { - name: {pred_name: True for pred_name in (Y_ENSEMBLE, Y_VALID, Y_TEST)} - for name in ensbuilder.read_losses + ensbuilder._run_predictions = { + name: {pred_name: np.array([1]) for pred_name in (Y_ENSEMBLE, Y_VALID, Y_TEST)} + for name in ensbuilder._run_info } sel_keys = ensbuilder.get_n_best_preds() From 1de376c77d196c8d7e16deff3e357f4f1750b9a8 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Mon, 28 Mar 2022 00:38:08 +0200 Subject: [PATCH 019/117] Switch `sizeof` for disk consumption --- autosklearn/ensemble_building/builder.py | 7 +++---- autosklearn/util/disk.py | 7 ++++--- .../test_3_models/test_3_models.py | 15 +++++++++------ 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 30f0e32f77..1bab4bc98e 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -30,6 +30,7 @@ from autosklearn.metrics import Scorer, calculate_loss, calculate_score from autosklearn.util.logging_ import get_named_client_logger from autosklearn.util.parallel import preload_modules +from autosklearn.util.disk import sizeof Y_ENSEMBLE = 0 Y_VALID = 1 @@ -736,9 +737,8 @@ def compute_loss_per_model(self) -> bool: # To save memory, we just compute the loss. self.run_info[y_ens_fn]["mtime_ens"] = os.path.getmtime(y_ens_fn) self.run_info[y_ens_fn]["loaded"] = 2 - self.run_info[y_ens_fn][ - "disc_space_cost_mb" - ] = self.get_disk_consumption(y_ens_fn) + mem_usage = round(sizeof(y_ens_fn, unit="MB"), 2) + self.run_info[y_ens_fn]["disc_space_cost_mb"] = mem_usage n_read_files += 1 @@ -1270,7 +1270,6 @@ def _get_list_of_sorted_preds(self): sorted_keys: list """ # Sort by loss - smaller is better! - print(self.run_info) sorted_keys = list( sorted( [(k, v["ens_loss"], v["num_run"]) for k, v in self.run_info.items()], diff --git a/autosklearn/util/disk.py b/autosklearn/util/disk.py index af59abc3f7..0b663c26d3 100644 --- a/autosklearn/util/disk.py +++ b/autosklearn/util/disk.py @@ -1,6 +1,7 @@ from __future__ import annotations from pathlib import Path +import math sizes = { "B": 0, @@ -11,7 +12,7 @@ } -def sizeof(path: Path | str, unit: str = "B") -> int: +def sizeof(path: Path | str, unit: str = "B") -> float: """Get the size of some path object Parameters @@ -24,7 +25,7 @@ def sizeof(path: Path | str, unit: str = "B") -> int: Returns ------- - int + float The size of the folder/file in the given units """ if unit not in sizes: @@ -39,4 +40,4 @@ def sizeof(path: Path | str, unit: str = "B") -> int: size = sum(f.stat().st_size for f in path.glob("**/*") if f.is_file()) power = sizes[unit] - return round(size / (1024**power)) + return size / math.pow(1024, power) diff --git a/test/test_ensemble_builder/test_3_models/test_3_models.py b/test/test_ensemble_builder/test_3_models/test_3_models.py index 3481897a1d..c0d0dfdaa6 100644 --- a/test/test_ensemble_builder/test_3_models/test_3_models.py +++ b/test/test_ensemble_builder/test_3_models/test_3_models.py @@ -115,10 +115,10 @@ def test_nbest( (4, 2), (1, 1), # If Float, translate float to # models. - # below, mock of each file is 100 Mb and 4 files .model and .npy (test/val/pred) - # per run (except for run3, there they are 5). Now, it takes 500MB for run 3 and - # another 500 MB of slack because we keep as much space as the largest model - # available as slack + # We mock so sizeof will return 500MB, this means that 500MB is required per run + # and we also need the 500MB extra as slack room. This means we can't fit 2 + # models in 1499MB but we can in 1500MB. We also don't include the dummy + # model which explains why even with 9999MB, we still only have 2 (1499.0, 1), (1500.0, 2), (9999.0, 2), @@ -156,10 +156,13 @@ def test_max_models_on_disc( max_models_on_disc=max_models_on_disc, ) - with patch("os.path.getsize") as mock: - mock.return_value = 100 * 1024 * 1024 + with patch("autosklearn.ensemble_building.builder.sizeof") as mock: + mock.return_value = 500 + ensbuilder.compute_loss_per_model() sel_keys = ensbuilder.get_n_best_preds() + assert mock.called + print(mock.call_args_list) assert len(sel_keys) == expected From 23de0fbc594b83fcc781b790cfc9012d0d69744c Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Mon, 28 Mar 2022 00:38:39 +0200 Subject: [PATCH 020/117] Remove disk consumption --- autosklearn/ensemble_building/builder.py | 26 ------------------------ 1 file changed, 26 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 1bab4bc98e..224c721080 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -582,32 +582,6 @@ def main( else: return self.ensemble_history, self.ensemble_nbest, None, None, None - def get_disk_consumption(self, pred_path): - """ - gets the cost of a model being on disc - """ - - match = self.model_fn_re.search(pred_path) - if not match: - raise ValueError("Invalid path format %s" % pred_path) - _seed = int(match.group(1)) - _num_run = int(match.group(2)) - _budget = float(match.group(3)) - - stored_files_for_run = os.listdir( - self.backend.get_numrun_directory(_seed, _num_run, _budget) - ) - stored_files_for_run = [ - os.path.join( - self.backend.get_numrun_directory(_seed, _num_run, _budget), file_name - ) - for file_name in stored_files_for_run - ] - this_model_cost = sum([os.path.getsize(path) for path in stored_files_for_run]) - - # get the megabytes - return round(this_model_cost / math.pow(1024, 2), 2) - def compute_loss_per_model(self) -> bool: """Compute the loss of the predictions on ensemble building data set; populates self.run_predictions and self.run_info From e34100d720f553934f9a88a7465a0836296b8101 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Mon, 28 Mar 2022 00:39:35 +0200 Subject: [PATCH 021/117] Remove unneeded function --- autosklearn/ensemble_building/builder.py | 35 ++++++------------------ 1 file changed, 9 insertions(+), 26 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 224c721080..1fba76ce62 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -743,8 +743,15 @@ def get_n_best_preds(self): ->Any model that is not best is candidate to deletion if max models in disc is exceeded. """ - - sorted_keys = self._get_list_of_sorted_preds() + # Sort by loss - smaller is better! + sorted_keys = list( + sorted( + [(k, v["ens_loss"], v["num_run"]) for k, v in self.run_info.items()], + # Sort by loss as priority 1 and then by num_run on a ascending order + # We want small num_run first + key=lambda x: (x[1], x[2]), + ) + ) # number of models available num_keys = len(sorted_keys) @@ -1230,30 +1237,6 @@ def _add_ensemble_trajectory(self, train_pred, valid_pred, test_pred): self.ensemble_history.append(performance_stamp) - def _get_list_of_sorted_preds(self): - """ - Returns a list of sorted predictions in descending order - Losses are taken from self.run_info. - - Parameters - ---------- - None - - Return - ------ - sorted_keys: list - """ - # Sort by loss - smaller is better! - sorted_keys = list( - sorted( - [(k, v["ens_loss"], v["num_run"]) for k, v in self.run_info.items()], - # Sort by loss as priority 1 and then by num_run on a ascending order - # We want small num_run first - key=lambda x: (x[1], x[2]), - ) - ) - return sorted_keys - def _delete_excess_models(self, selected_keys: list[str]): """ Deletes models excess models on disc. self.max_models_on_disc From 2d90370943c1a400cdcd76e94776caa0a052be9a Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Mon, 28 Mar 2022 00:48:47 +0200 Subject: [PATCH 022/117] Add type hints and documenation --- autosklearn/ensemble_building/builder.py | 97 ++++++++++++++---------- 1 file changed, 55 insertions(+), 42 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 1fba76ce62..79459b868c 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -1,11 +1,10 @@ from __future__ import annotations -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, Tuple import glob import gzip import logging.handlers -import math import multiprocessing import numbers import os @@ -28,9 +27,9 @@ from autosklearn.constants import BINARY_CLASSIFICATION from autosklearn.ensembles.ensemble_selection import EnsembleSelection from autosklearn.metrics import Scorer, calculate_loss, calculate_score +from autosklearn.util.disk import sizeof from autosklearn.util.logging_ import get_named_client_logger from autosklearn.util.parallel import preload_modules -from autosklearn.util.disk import sizeof Y_ENSEMBLE = 0 Y_VALID = 1 @@ -731,17 +730,19 @@ def compute_loss_per_model(self) -> bool: ) return True - def get_n_best_preds(self): - """ - get best n predictions (i.e., keys of self.run_info) - according to the loss on the "ensemble set" - n: self.ensemble_nbest + def get_n_best_preds(self) -> list[str]: + """Get best n predictions according to the loss on the "ensemble set" Side effects: - ->Define the n-best models to use in ensemble - ->Only the best models are loaded - ->Any model that is not best is candidate to deletion - if max models in disc is exceeded. + * Define the n-best models to use in ensemble + * Only the best models are loaded + * Any model that is not best is deletable if max models in disc is exceeded. + + Returns + ------- + list[str] + Returns the paths of the selected models which are used as keys in + `run_predictions` and `run_info` """ # Sort by loss - smaller is better! sorted_keys = list( @@ -925,20 +926,21 @@ def get_n_best_preds(self): return sorted_keys[:ensemble_n_best] def get_valid_test_preds( - self, selected_keys: list[str] + self, + selected_keys: list[str], ) -> Tuple[list[str], list[str]]: - """Get valid and test predictions from disc and store them in self.run_predictions + """Get valid and test predictions from disc and store in self.run_predictions Parameters - --------- + ---------- selected_keys: list list of selected keys of self.run_predictions Return ------ - success_keys: - all keys in selected keys for which we could read the valid and - test predictions + keys_valid: list[str], keys_test: list[str] + All keys in selected keys for which we could read the valid and test + predictions. """ success_keys_valid = [] success_keys_test = [] @@ -1039,17 +1041,18 @@ def get_valid_test_preds( return success_keys_valid, success_keys_test - def fit_ensemble(self, selected_keys: list): - """ + def fit_ensemble(self, selected_keys: list[str]) -> EnsembleSelection: + """TODO + Parameters - --------- - selected_keys: list - list of selected keys of self.run_info + ---------- + selected_keys: list[str] + List of selected keys of self.run_info Returns ------- ensemble: EnsembleSelection - trained Ensemble + The trained ensemble """ predictions_train = [self.run_predictions[k][Y_ENSEMBLE] for k in selected_keys] include_num_runs = [ @@ -1123,26 +1126,31 @@ def predict( selected_keys: list, n_preds: int, index_run: int, - ): + ) -> np.ndarray | None: """Save preditions on ensemble, validation and test data on disc Parameters ---------- - set_: ["valid","test"] - data split name + set_: "valid" | "test" | str + The data split name, returns preds for y_ensemble if not "valid" or "test" + ensemble: EnsembleSelection - trained Ensemble - selected_keys: list - list of selected keys of self.run_info + The trained Ensemble + + selected_keys: list[str] + List of selected keys of self.run_info + n_preds: int - number of prediction models used for ensemble building - same number of predictions on valid and test are necessary + Number of prediction models used for ensemble building same number of + predictions on valid and test are necessary + index_run: int n-th time that ensemble predictions are written to disc Return ------ - y: np.ndarray + np.ndarray | None + Returns the predictions if it can, else None """ self.logger.debug("Predicting the %s set with the ensemble!", set_) @@ -1169,20 +1177,26 @@ def predict( ) return None - def _add_ensemble_trajectory(self, train_pred, valid_pred, test_pred): + def _add_ensemble_trajectory( + self, + train_pred: np.ndarray, + valid_pred: np.ndarray | None, + test_pred: np.ndarray | None, + ) -> None: """ Records a snapshot of how the performance look at a given training time. Parameters ---------- - ensemble: EnsembleSelection - The ensemble selection object to record - valid_pred: np.ndarray + train_pred: np.ndarray + The training predictions + + valid_pred: np.ndarray | None The predictions on the validation set using ensemble - test_pred: np.ndarray - The predictions on the test set using ensemble + test_pred: np.ndarray | None + The predictions on the test set using ensemble """ if self.task_type == BINARY_CLASSIFICATION: if len(train_pred.shape) == 1 or train_pred.shape[1] == 1: @@ -1237,7 +1251,7 @@ def _add_ensemble_trajectory(self, train_pred, valid_pred, test_pred): self.ensemble_history.append(performance_stamp) - def _delete_excess_models(self, selected_keys: list[str]): + def _delete_excess_models(self, selected_keys: list[str]) -> None: """ Deletes models excess models on disc. self.max_models_on_disc defines the upper limit on how many models to keep. @@ -1276,8 +1290,7 @@ def _delete_excess_models(self, selected_keys: list[str]): e, ) - def _read_np_fn(self, path): - + def _read_np_fn(self, path: str) -> np.ndarray: # Support for string precision if isinstance(self.precision, str): precision = int(self.precision) From 1fe4c6124d504406dedb710905f2506196aad5eb Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Mon, 28 Mar 2022 00:55:05 +0200 Subject: [PATCH 023/117] Simplyify _read_np_fn --- autosklearn/ensemble_building/builder.py | 49 ++++++++++-------------- 1 file changed, 21 insertions(+), 28 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 79459b868c..41582b04ae 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -3,7 +3,6 @@ from typing import Any, Tuple import glob -import gzip import logging.handlers import multiprocessing import numbers @@ -684,7 +683,7 @@ def compute_loss_per_model(self) -> bool: # actually read the predictions and compute their respective loss try: - y_ensemble = self._read_np_fn(y_ens_fn) + y_ensemble = self._predictions_from(y_ens_fn) loss = calculate_loss( solution=self.y_true_ensemble, prediction=y_ensemble, @@ -917,7 +916,7 @@ def get_n_best_preds(self) -> list[str]: k not in self.run_predictions or self.run_predictions[k][Y_ENSEMBLE] is None ) and self.run_info[k]["loaded"] != 3: - self.run_predictions[k][Y_ENSEMBLE] = self._read_np_fn(k) + self.run_predictions[k][Y_ENSEMBLE] = self._predictions_from(k) # No need to load valid and test here because they are loaded # only if the model ends up in the ensemble self.run_info[k]["loaded"] = 1 @@ -1006,7 +1005,7 @@ def get_valid_test_preds( success_keys_valid.append(k) continue try: - y_valid = self._read_np_fn(valid_fn) + y_valid = self._predictions_from(valid_fn) self.run_predictions[k][Y_VALID] = y_valid success_keys_valid.append(k) self.run_info[k]["mtime_valid"] = os.path.getmtime(valid_fn) @@ -1030,7 +1029,7 @@ def get_valid_test_preds( success_keys_test.append(k) continue try: - y_test = self._read_np_fn(test_fn) + y_test = self._predictions_from(test_fn) self.run_predictions[k][Y_TEST] = y_test success_keys_test.append(k) self.run_info[k]["mtime_test"] = os.path.getmtime(test_fn) @@ -1290,27 +1289,21 @@ def _delete_excess_models(self, selected_keys: list[str]) -> None: e, ) - def _read_np_fn(self, path: str) -> np.ndarray: - # Support for string precision - if isinstance(self.precision, str): - precision = int(self.precision) - self.logger.warning("Interpreted str-precision as {}".format(precision)) - else: - precision = self.precision + def _predictions_from(self, path: str | Path) -> np.ndarray: + if isinstance(path, str): + path = Path(path) - if path.endswith("gz"): - open_method = gzip.open - elif path.endswith("npy"): - open_method = open - else: - raise ValueError("Unknown filetype %s" % path) - with open_method(path, "rb") as fp: - if precision == 16: - predictions = np.load(fp, allow_pickle=True).astype(dtype=np.float16) - elif precision == 32: - predictions = np.load(fp, allow_pickle=True).astype(dtype=np.float32) - elif precision == 64: - predictions = np.load(fp, allow_pickle=True).astype(dtype=np.float64) - else: - predictions = np.load(fp, allow_pickle=True) - return predictions + precision = self.precision + + with path.open("rb") as f: + predictions = np.load(f) + + dtypes = { + 16: np.float16, + 32: np.float32, + 64: np.float64, + } + dtype = dtypes.get(precision, predictions.dtype) + predictions = predictions.astype(dtype=dtype, copy=False) + + return predictions From facbd7ff1b97bdf9a2b7d0afc9f39b54d01dbf9d Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Mon, 28 Mar 2022 01:12:45 +0200 Subject: [PATCH 024/117] Update get_valid_test_preds to use Pathlib --- autosklearn/ensemble_building/builder.py | 147 +++++++---------------- 1 file changed, 43 insertions(+), 104 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 41582b04ae..a8e69f5327 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -945,98 +945,46 @@ def get_valid_test_preds( success_keys_test = [] for k in selected_keys: - valid_fn = glob.glob( - os.path.join( - glob.escape(self.backend.get_runs_directory()), - "%d_%d_%s" - % ( - self.run_info[k]["seed"], - self.run_info[k]["num_run"], - self.run_info[k]["budget"], - ), - "predictions_valid_%d_%d_%s.npy*" - % ( - self.run_info[k]["seed"], - self.run_info[k]["num_run"], - self.run_info[k]["budget"], - ), - ) - ) - valid_fn = [ - vfn - for vfn in valid_fn - if vfn.endswith(".npy") or vfn.endswith(".npy.gz") - ] - test_fn = glob.glob( - os.path.join( - glob.escape(self.backend.get_runs_directory()), - "%d_%d_%s" - % ( - self.run_info[k]["seed"], - self.run_info[k]["num_run"], - self.run_info[k]["budget"], - ), - "predictions_test_%d_%d_%s.npy*" - % ( - self.run_info[k]["seed"], - self.run_info[k]["num_run"], - self.run_info[k]["budget"], - ), - ) - ) - test_fn = [ - tfn - for tfn in test_fn - if tfn.endswith(".npy") or tfn.endswith(".npy.gz") - ] + info = self.run_info[k] + seed, num_run, budget = (info["seed"], info["num_run"], info["budget"]) - if len(valid_fn) == 0: - # self.logger.debug("Not found validation prediction file " - # "(although ensemble predictions available): " - # "%s" % valid_fn) - pass - else: - valid_fn = valid_fn[0] + rundir = Path(self.backend.get_numrun_directory(seed, num_run, budget)) + valid_fn = rundir / f"predictions_valid_{seed}_{num_run}_{budget}.npy" + test_fn = rundir / f"predictions_test_{seed}_{num_run}_{budget}.npy" + + if valid_fn.exists(): if ( - self.run_info[k]["mtime_valid"] == os.path.getmtime(valid_fn) + self.run_info[k]["mtime_valid"] == valid_fn.stat().st_mtime and k in self.run_predictions and self.run_predictions[k][Y_VALID] is not None ): success_keys_valid.append(k) continue - try: - y_valid = self._predictions_from(valid_fn) - self.run_predictions[k][Y_VALID] = y_valid - success_keys_valid.append(k) - self.run_info[k]["mtime_valid"] = os.path.getmtime(valid_fn) - except Exception: - self.logger.warning( - "Error loading %s: %s", valid_fn, traceback.format_exc() - ) - - if len(test_fn) == 0: - # self.logger.debug("Not found test prediction file (although " - # "ensemble predictions available):%s" % - # test_fn) - pass - else: - test_fn = test_fn[0] + else: + try: + y_valid = self._predictions_from(valid_fn) + self.run_predictions[k][Y_VALID] = y_valid + success_keys_valid.append(k) + self.run_info[k]["mtime_valid"] = valid_fn.stat().st_mtime + except Exception: + self.logger.warning(f"Err {valid_fn}:{traceback.format_exc()}") + + if test_fn.exists(): if ( - self.run_info[k]["mtime_test"] == os.path.getmtime(test_fn) + self.run_info[k]["mtime_test"] == test_fn.stat().st_mtime and k in self.run_predictions and self.run_predictions[k][Y_TEST] is not None ): success_keys_test.append(k) - continue - try: - y_test = self._predictions_from(test_fn) - self.run_predictions[k][Y_TEST] = y_test - success_keys_test.append(k) - self.run_info[k]["mtime_test"] = os.path.getmtime(test_fn) - except Exception: - self.logger.warning( - "Error loading %s: %s", test_fn, traceback.format_exc() - ) + else: + + try: + y_test = self._predictions_from(test_fn) + self.run_predictions[k][Y_TEST] = y_test + success_keys_test.append(k) + self.run_info[k]["mtime_test"] = os.path.getmtime(test_fn) + except Exception: + self.logger.warning(f"Err {test_fn}:{traceback.format_exc()}") return success_keys_valid, success_keys_test @@ -1064,6 +1012,7 @@ def fit_ensemble(self, selected_keys: list[str]) -> EnsembleSelection: ] # check hash if ensemble training data changed + # TODO could we just use the size, and the last row? current_hash = "".join( [ str(zlib.adler32(predictions_train[i].data.tobytes())) @@ -1073,10 +1022,8 @@ def fit_ensemble(self, selected_keys: list[str]) -> EnsembleSelection: if self.last_hash == current_hash: self.logger.debug( "No new model predictions selected -- skip ensemble building " - "-- current performance: %f", - self.validation_performance_, + f"-- current performance: {self.validation_performance_}", ) - return None self._last_hash = current_hash @@ -1089,34 +1036,26 @@ def fit_ensemble(self, selected_keys: list[str]) -> EnsembleSelection: ) try: - self.logger.debug( - "Fitting the ensemble on %d models.", - len(predictions_train), - ) + self.logger.debug(f"Fitting ensemble on {len(predictions_train)} models") + start_time = time.time() ensemble.fit(predictions_train, self.y_true_ensemble, include_num_runs) - end_time = time.time() - self.logger.debug( - "Fitting the ensemble took %.2f seconds.", - end_time - start_time, - ) + + duration = time.time() - start_time + + self.logger.debug(f"Fitting the ensemble took {duration} seconds.") self.logger.info(ensemble) - self.validation_performance_ = min( - self.validation_performance_, - ensemble.get_validation_performance(), - ) - except ValueError: - self.logger.error("Caught ValueError: %s", traceback.format_exc()) - return None - except IndexError: - self.logger.error("Caught IndexError: %s" + traceback.format_exc()) - return None + ens_perf = ensemble.get_validation_performance() + self.validation_performance_ = min(self.validation_performance_, ens_perf) + + except Exception as e: + self.logger.error(f"Caught error {e}: {traceback.format_exc()}") + ensemble = None finally: # Explicitly free memory del predictions_train - - return ensemble + return ensemble def predict( self, From 9e6169b76d4828b52cf91ffda73782a7c3fce4fd Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Mon, 28 Mar 2022 01:20:32 +0200 Subject: [PATCH 025/117] Add intersection to functional --- autosklearn/util/functional.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/autosklearn/util/functional.py b/autosklearn/util/functional.py index 55f38ddf5d..fc511f7866 100644 --- a/autosklearn/util/functional.py +++ b/autosklearn/util/functional.py @@ -1,7 +1,10 @@ -from typing import Optional +from typing import Optional, Iterable, Set, TypeVar +from functools import reduce import numpy as np +T = TypeVar("T") + def normalize(x: np.ndarray, axis: Optional[int] = None) -> np.ndarray: """Normalizes an array along an axis @@ -52,3 +55,21 @@ def normalize(x: np.ndarray, axis: Optional[int] = None) -> np.ndarray: The normalized array """ return x / x.sum(axis=axis, keepdims=True) + + +def intersection(items: Iterable[Iterable[T]]) -> Set[T]: + """Does an intersection over all collection of items + Parameters + ---------- + items : Iterable[Iterable[T]] + A list of lists + Returns + ------- + Set[T] + The intersection of all items + """ + items = list(items) + if len(items) == 0: + return set() + + return set(reduce(lambda s1, s2: set(s1) & set(s2), items, items[0])) From ebb2c78a0e7e85671aef22dcd6e52a45f497f0ad Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Mon, 28 Mar 2022 01:30:08 +0200 Subject: [PATCH 026/117] Make functional take *args --- autosklearn/util/functional.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/autosklearn/util/functional.py b/autosklearn/util/functional.py index fc511f7866..5eb59d97a8 100644 --- a/autosklearn/util/functional.py +++ b/autosklearn/util/functional.py @@ -1,4 +1,6 @@ -from typing import Optional, Iterable, Set, TypeVar +from __future__ import annotations + +from typing import Optional, TypeVar, Iterable from functools import reduce import numpy as np @@ -57,18 +59,26 @@ def normalize(x: np.ndarray, axis: Optional[int] = None) -> np.ndarray: return x / x.sum(axis=axis, keepdims=True) -def intersection(items: Iterable[Iterable[T]]) -> Set[T]: +def intersection(*items: Iterable[T]) -> set[T]: """Does an intersection over all collection of items + + ..code:: python + + ans = intersection(["a", "b", "c"], "ab", ("b", "c")) + + items = [(1, 2, 3), (2, 3), (4, 5)] + ans = intesection(*items) + Parameters ---------- - items : Iterable[Iterable[T]] + *items : Iterable[T] A list of lists + Returns ------- Set[T] The intersection of all items """ - items = list(items) if len(items) == 0: return set() From d0f098041f8d1608983ab992e9c62a4e9441820e Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Mon, 28 Mar 2022 01:35:26 +0200 Subject: [PATCH 027/117] Further simplifications --- autosklearn/ensemble_building/builder.py | 53 +++++++++++------------- 1 file changed, 25 insertions(+), 28 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index a8e69f5327..d342e53baa 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -29,6 +29,7 @@ from autosklearn.util.disk import sizeof from autosklearn.util.logging_ import get_named_client_logger from autosklearn.util.parallel import preload_modules +from autosklearn.util.functional import intersection Y_ENSEMBLE = 0 Y_VALID = 1 @@ -467,16 +468,14 @@ def main( selected_keys=candidate_models ) - # If valid/test predictions loaded, then reduce candidate models to this set - if ( - len(n_sel_test) != 0 - and len(n_sel_valid) != 0 - and len(set(n_sel_valid).intersection(set(n_sel_test))) == 0 - ): - # Both n_sel_* have entries, but there is no overlap, this is critical - self.logger.error( - "n_sel_valid and n_sel_test are not empty, but do not overlap" - ) + # Get a set representation of them as we will begin doing intersections + candidates_set = set(candidate_models) + valid_set = set(n_sel_valid) + test_set = set(n_sel_test) + + # Both n_sel_* have entries, but there is no overlap, this is critical + if len(test_set) > 0 and len(valid_set) > 0 and len(valid_set & test_set) == 0: + self.logger.error("n_sel_valid and n_sel_test not empty but do not overlap") if return_predictions: return ( self.ensemble_history, @@ -488,31 +487,29 @@ def main( else: return self.ensemble_history, self.ensemble_nbest, None, None, None + # If valid/test predictions loaded, then reduce candidate models to this set # If any of n_sel_* is not empty and overlaps with candidate_models, # then ensure candidate_models AND n_sel_test are sorted the same - candidate_models_set = set(candidate_models) - if candidate_models_set.intersection(n_sel_valid).intersection(n_sel_test): - candidate_models = sorted( - list( - candidate_models_set.intersection(n_sel_valid).intersection( - n_sel_test - ) - ) - ) + candidates_set = set(candidate_models) + valid_set = set(n_sel_valid) + test_set = set(n_sel_test) + + intersect = intersection(candidates_set, valid_set, test_set) + if len(intersect) > 0: + candidate_models = sorted(list(intersect)) n_sel_test = candidate_models n_sel_valid = candidate_models - elif candidate_models_set.intersection(n_sel_valid): - candidate_models = sorted( - list(candidate_models_set.intersection(n_sel_valid)) - ) + + elif len(candidates_set & valid_set) > 0: + candidate_models = sorted(list(candidates_set & valid_set)) n_sel_valid = candidate_models - elif candidate_models_set.intersection(n_sel_test): - candidate_models = sorted( - list(candidate_models_set.intersection(n_sel_test)) - ) + + elif len(candidates_set & n_sel_test) > 0: + candidate_models = sorted(list(candidates_set & test_set)) n_sel_test = candidate_models + + # This has to be the case else: - # This has to be the case n_sel_test = [] n_sel_valid = [] From 9903b747801258e5b5fca5a27051112ebdf9e83b Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 29 Mar 2022 19:31:14 +0200 Subject: [PATCH 028/117] Add a dataclass to represent run information for builder --- autosklearn/ensemble_building/builder.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index d342e53baa..586b955733 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -38,6 +38,26 @@ MODEL_FN_RE = r"_([0-9]*)_([0-9]*)_([0-9]{1,3}\.[0-9]*)\.npy" +@dataclass +class RunInfo: + """Dataclass for storing information about a run""" + + seed: int + id: int + budget: float + loss: float = np.inf + mtime_ens: float = 0 + mtime_test: float = 0 + mtime_valid: float = 0 + disk_cost: float | None = None + loaded: int = 0 + # Lazy keys so far: + # 0 - not loaded + # 1 - loaded and in memory + # 2 - loaded but dropped again + # 3 - deleted from disk due to space constraints + + class EnsembleBuilder: model_fn_re = re.compile(MODEL_FN_RE) From 3ff58732a2503eb0afe08a69668220382b79f373 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 29 Mar 2022 19:32:00 +0200 Subject: [PATCH 029/117] Rename to Run --- autosklearn/ensemble_building/builder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 586b955733..e7055766ff 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -39,7 +39,7 @@ @dataclass -class RunInfo: +class Run: """Dataclass for storing information about a run""" seed: int From 99399b656340897e8cec1cacfb2ebb24ee6f2cac Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 29 Mar 2022 22:35:55 +0200 Subject: [PATCH 030/117] Change to Run objects --- autosklearn/ensemble_building/builder.py | 266 +++++++++--------- .../test_3_models/test_3_models.py | 18 +- .../test_ensemble_builder.py | 105 +++---- 3 files changed, 189 insertions(+), 200 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index e7055766ff..cd3d65d519 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -13,6 +13,7 @@ import time import traceback import zlib +from dataclasses import dataclass from pathlib import Path import numpy as np @@ -27,9 +28,9 @@ from autosklearn.ensembles.ensemble_selection import EnsembleSelection from autosklearn.metrics import Scorer, calculate_loss, calculate_score from autosklearn.util.disk import sizeof +from autosklearn.util.functional import intersection from autosklearn.util.logging_ import get_named_client_logger from autosklearn.util.parallel import preload_modules -from autosklearn.util.functional import intersection Y_ENSEMBLE = 0 Y_VALID = 1 @@ -43,13 +44,14 @@ class Run: """Dataclass for storing information about a run""" seed: int - id: int - budget: float + num_run: int + ens_file: str + budget: float = 0.0 loss: float = np.inf mtime_ens: float = 0 mtime_test: float = 0 mtime_valid: float = 0 - disk_cost: float | None = None + mem_usage: int | None = None loaded: int = 0 # Lazy keys so far: # 0 - not loaded @@ -57,6 +59,22 @@ class Run: # 2 - loaded but dropped again # 3 - deleted from disk due to space constraints + def is_dummy(self) -> bool: + """Whether this run is a dummy run or not""" + return self.num_run == 1 + + def last_modified(self) -> float: + """Query for when the ens file was last modified""" + return self.ens_file.stat().st_mtime + + @property + def id(self) -> tuple[int, int, float]: + """Get the three components of it's id""" + return self.seed, self.num_run, self.budget + + def __str__(self) -> str: + return f"{self.seed}_{self.num_run}_{self.budget}" + class EnsembleBuilder: @@ -206,7 +224,7 @@ def __init__( self._last_hash: str | None = None # The cached info of runs which could come from previous instances - self._run_info: dict[str, dict[str, Any]] | None = None + self._runs: dict[str, Run] | None = None @property def run_predictions_path(self) -> Path: @@ -214,7 +232,7 @@ def run_predictions_path(self) -> Path: return Path(self.backend.internals_directory) / "ensemble_read_preds.pkl" @property - def run_info_path(self) -> Path: + def runs_path(self) -> Path: """Path to the cached losses we store between runs""" return Path(self.backend.internals_directory) / "ensemble_read_losses.pkl" @@ -255,28 +273,16 @@ def last_hash(self) -> str: return self._last_hash @property - def run_info(self) -> dict[str, dict[str, Any]]: - """Get the cached information from previous runs - { - "file name": { - "ens_loss": float - "mtime_ens": str, - "mtime_valid": str, - "mtime_test": str, - "seed": int, - "num_run": int, - } - } - """ - if self._run_info is None: - self._run_info = {} + def runs(self) -> dict[str, Run]: + """Get the cached information from previous runs""" + if self._runs is None: + self._runs = {} - path = self.run_info_path - if path.exists(): - with path.open("rb") as memory: - self._run_info = pickle.load(memory) + if self.runs_path.exists(): + with self.runs_path.open("rb") as memory: + self._runs = pickle.load(memory) - return self._run_info + return self._runs def run( self, @@ -454,7 +460,7 @@ def main( time_left - used_time, ) - # populates self.run_predictions and self.run_info + # populates self.run_predictions and self.runs if not self.compute_loss_per_model(): if return_predictions: return ( @@ -548,8 +554,8 @@ def main( # Save the read losses status for the next iteration, we should do this # before doing predictions as this is a likely place of memory issues - with self.run_info_path.open("wb") as f: - pickle.dump(self.run_info, f) + with self.runs_path.open("wb") as f: + pickle.dump(self.runs, f) if ensemble is not None: train_pred = self.predict( @@ -599,13 +605,13 @@ def main( def compute_loss_per_model(self) -> bool: """Compute the loss of the predictions on ensemble building data set; - populates self.run_predictions and self.run_info + populates self.run_predictions and self.runs Side-effects ------------ * Populates - `self.y_ens_files` all the ensemble predictions it could find for runs - - `self.run_info` with the new losses it calculated + - `self.runs` with the new losses it calculated Returns ------- @@ -659,43 +665,37 @@ def compute_loss_per_model(self) -> bool: for y_ens_fn, match, _seed, _num_run, _budget, mtime in sorted( to_read, key=lambda x: x[5] ): + + # Break out if we've read more files than we should if self.read_at_most and n_read_files >= self.read_at_most: - # limit the number of files that will be read - # to limit memory consumption break - if not y_ens_fn.endswith(".npy") and not y_ens_fn.endswith(".npy.gz"): - self.logger.info( - "Error loading file (not .npy or .npy.gz): %s", y_ens_fn - ) + if not y_ens_fn.endswith(".npy"): + self.logger.warning(f"Error loading file (not .npy): {y_ens_fn}") continue - if not self.run_info.get(y_ens_fn): - self.run_info[y_ens_fn] = { - "ens_loss": np.inf, - "mtime_ens": 0, - "mtime_valid": 0, - "mtime_test": 0, - "seed": _seed, - "num_run": _num_run, - "budget": _budget, - "disc_space_cost_mb": None, - # Lazy keys so far: - # 0 - not loaded - # 1 - loaded and in memory - # 2 - loaded but dropped again - # 3 - deleted from disk due to space constraints - "loaded": 0, - } - if not self.run_predictions.get(y_ens_fn): + # Get the run, creating one if it doesn't exist + if y_ens_fn not in self.runs: + run = Run( + seed=_seed, + num_run=_num_run, + budget=_budget, + ens_file=y_ens_fn, + ) + self.runs[y_ens_fn] = run + else: + run = self.runs[y_ens_fn] + + # Put an entry in for the predictions if it doesn't exist + if y_ens_fn not in self.run_predictions: self.run_predictions[y_ens_fn] = { Y_ENSEMBLE: None, Y_VALID: None, Y_TEST: None, } - if self.run_info[y_ens_fn]["mtime_ens"] == mtime: - # same time stamp; nothing changed; + # If the timestamp is the same, nothing's changed so we can move on + if run.mtime_ens == mtime: continue # actually read the predictions and compute their respective loss @@ -709,40 +709,31 @@ def compute_loss_per_model(self) -> bool: scoring_functions=None, ) - if np.isfinite(self.run_info[y_ens_fn]["ens_loss"]): + if np.isfinite(run.loss): self.logger.debug( - "Changing ensemble loss for file %s from %f to %f " - "because file modification time changed? %f - %f", - y_ens_fn, - self.run_info[y_ens_fn]["ens_loss"], - loss, - self.run_info[y_ens_fn]["mtime_ens"], - os.path.getmtime(y_ens_fn), + f"Changing ensemble loss for file {y_ens_fn} from {run.loss} to" + f"{loss} because file modification time changed?" + f"{run.mtime_ens} -> {run.last_modified()}" ) - self.run_info[y_ens_fn]["ens_loss"] = loss + run.loss = loss # It is not needed to create the object here # To save memory, we just compute the loss. - self.run_info[y_ens_fn]["mtime_ens"] = os.path.getmtime(y_ens_fn) - self.run_info[y_ens_fn]["loaded"] = 2 - mem_usage = round(sizeof(y_ens_fn, unit="MB"), 2) - self.run_info[y_ens_fn]["disc_space_cost_mb"] = mem_usage + run.mtime_ens = os.path.getmtime(y_ens_fn) + run.loaded = 2 + run.mem_usage = round(sizeof(y_ens_fn, unit="MB"), 2) n_read_files += 1 except Exception: - self.logger.warning( - "Error loading %s: %s", - y_ens_fn, - traceback.format_exc(), - ) - self.run_info[y_ens_fn]["ens_loss"] = np.inf + self.logger.warning(f"Err loading {y_ens_fn}: {traceback.format_exc()}") + run.loss = np.inf + n_files_read = sum([run.loaded > 0 for run in self.runs.values()]) self.logger.debug( - "Done reading %d new prediction files. Loaded %d predictions in " "total.", - n_read_files, - np.sum([pred["loaded"] > 0 for pred in self.run_info.values()]), + f"Done reading {n_read_files} new prediction files." + f"Loaded {n_files_read} predictions in total." ) return True @@ -758,23 +749,19 @@ def get_n_best_preds(self) -> list[str]: ------- list[str] Returns the paths of the selected models which are used as keys in - `run_predictions` and `run_info` + `run_predictions` and `runs` """ - # Sort by loss - smaller is better! - sorted_keys = list( - sorted( - [(k, v["ens_loss"], v["num_run"]) for k, v in self.run_info.items()], - # Sort by loss as priority 1 and then by num_run on a ascending order - # We want small num_run first - key=lambda x: (x[1], x[2]), - ) - ) + # Sort by loss as priority 1 and then by num_run on a ascending order + # We want small id first + keys = [(path, run.loss, run.num_run) for path, run in self.runs.items()] + sorted_keys = sorted(keys, key=lambda x: (x[1], x[2])) # number of models available num_keys = len(sorted_keys) # remove all that are at most as good as random # note: dummy model must have run_id=1 (there is no run_id=0) dummy_losses = list(filter(lambda x: x[2] == 1, sorted_keys)) + # number of dummy models num_dummy = len(dummy_losses) dummy_loss = dummy_losses[0] @@ -800,9 +787,9 @@ def get_n_best_preds(self) -> list[str]: num_dummy, ) sorted_keys = [ - (k, v["ens_loss"], v["num_run"]) - for k, v in self.run_info.items() - if v["seed"] == self.seed and v["num_run"] == 1 + (path, run.loss, run.num_run) + for path, run in self.runs.items() + if run.seed == self.seed and run.is_dummy() ] # reload predictions if losses changed over time and a model is # considered to be in the top models again! @@ -831,12 +818,12 @@ def get_n_best_preds(self) -> list[str]: if self.max_models_on_disc is not None: if not isinstance(self.max_models_on_disc, numbers.Integral): consumption = [ - [ - v["ens_loss"], - v["disc_space_cost_mb"], - ] - for v in self.run_info.values() - if v["disc_space_cost_mb"] is not None + ( + run.loss, + run.mem_usage, + ) + for run in self.runs.values() + if run.mem_usage is not None ] max_consumption = max(c[1] for c in consumption) @@ -913,32 +900,33 @@ def get_n_best_preds(self) -> list[str]: # remove loaded predictions for non-winning models for k in sorted_keys[ensemble_n_best:]: + if k in self.run_predictions: self.run_predictions[k][Y_ENSEMBLE] = None self.run_predictions[k][Y_VALID] = None self.run_predictions[k][Y_TEST] = None - if self.run_info[k]["loaded"] == 1: + + run = self.runs[k] + if run.loaded == 1: self.logger.debug( - "Dropping model %s (%d,%d) with loss %f.", - k, - self.run_info[k]["seed"], - self.run_info[k]["num_run"], - self.run_info[k]["ens_loss"], + f"Dropping model {k} {run.seed}, {run.num_run} with loss {run.loss}" ) - self.run_info[k]["loaded"] = 2 + run.loaded = 2 # Load the predictions for the winning for k in sorted_keys[:ensemble_n_best]: - if ( + + run = self.runs[k] + if run.loaded != 3 and ( k not in self.run_predictions or self.run_predictions[k][Y_ENSEMBLE] is None - ) and self.run_info[k]["loaded"] != 3: + ): + # No need to load valid and test here because they are loaded only if + # the model ends up in the ensemble self.run_predictions[k][Y_ENSEMBLE] = self._predictions_from(k) - # No need to load valid and test here because they are loaded - # only if the model ends up in the ensemble - self.run_info[k]["loaded"] = 1 + run.loaded = 1 - # return keys of self.run_info with lowest losses + # return keys of self.runs with lowest losses return sorted_keys[:ensemble_n_best] def get_valid_test_preds( @@ -962,44 +950,46 @@ def get_valid_test_preds( success_keys_test = [] for k in selected_keys: - info = self.run_info[k] - seed, num_run, budget = (info["seed"], info["num_run"], info["budget"]) + run = self.runs[k] - rundir = Path(self.backend.get_numrun_directory(seed, num_run, budget)) - valid_fn = rundir / f"predictions_valid_{seed}_{num_run}_{budget}.npy" - test_fn = rundir / f"predictions_test_{seed}_{num_run}_{budget}.npy" + rundir = Path(self.backend.get_numrun_directory(*run.id)) + + valid_fn = rundir / f"predictions_valid_{run}.npy" + test_fn = rundir / f"predictions_test_{run}.npy" if valid_fn.exists(): if ( - self.run_info[k]["mtime_valid"] == valid_fn.stat().st_mtime + run.mtime_valid == valid_fn.stat().st_mtime and k in self.run_predictions and self.run_predictions[k][Y_VALID] is not None ): success_keys_valid.append(k) continue + else: try: y_valid = self._predictions_from(valid_fn) self.run_predictions[k][Y_VALID] = y_valid success_keys_valid.append(k) - self.run_info[k]["mtime_valid"] = valid_fn.stat().st_mtime + run.mtime_valid = valid_fn.stat().st_mtime + except Exception: self.logger.warning(f"Err {valid_fn}:{traceback.format_exc()}") if test_fn.exists(): if ( - self.run_info[k]["mtime_test"] == test_fn.stat().st_mtime + run.mtime_test == test_fn.stat().st_mtime and k in self.run_predictions and self.run_predictions[k][Y_TEST] is not None ): success_keys_test.append(k) - else: + else: try: y_test = self._predictions_from(test_fn) self.run_predictions[k][Y_TEST] = y_test success_keys_test.append(k) - self.run_info[k]["mtime_test"] = os.path.getmtime(test_fn) + run.mtime_test = os.path.getmtime(test_fn) except Exception: self.logger.warning(f"Err {test_fn}:{traceback.format_exc()}") @@ -1011,7 +1001,7 @@ def fit_ensemble(self, selected_keys: list[str]) -> EnsembleSelection: Parameters ---------- selected_keys: list[str] - List of selected keys of self.run_info + List of selected keys of self.runs Returns ------- @@ -1019,14 +1009,11 @@ def fit_ensemble(self, selected_keys: list[str]) -> EnsembleSelection: The trained ensemble """ predictions_train = [self.run_predictions[k][Y_ENSEMBLE] for k in selected_keys] - include_num_runs = [ - ( - self.run_info[k]["seed"], - self.run_info[k]["num_run"], - self.run_info[k]["budget"], - ) - for k in selected_keys - ] + + selected_runs = [self.runs[k] for k in selected_keys] + + # List of (seed, num_run, budget) + include_num_runs = [run.id for run in selected_runs] # check hash if ensemble training data changed # TODO could we just use the size, and the last row? @@ -1093,7 +1080,7 @@ def predict( The trained Ensemble selected_keys: list[str] - List of selected keys of self.run_info + List of selected keys of self.runs n_preds: int Number of prediction models used for ensemble building same number of @@ -1233,16 +1220,17 @@ def _delete_excess_models(self, selected_keys: list[str]) -> None: try: os.rename(numrun_dir, numrun_dir + ".old") shutil.rmtree(numrun_dir + ".old") - self.logger.info("Deleted files of non-candidate model %s", pred_path) - self.run_info[pred_path]["disc_space_cost_mb"] = None - self.run_info[pred_path]["loaded"] = 3 - self.run_info[pred_path]["ens_loss"] = np.inf + + self.logger.info(f"Deleted files of non-candidate model {pred_path}") + + self.runs[pred_path].disc_space_cost_mb = None + self.runs[pred_path].loaded = 3 + self.runs[pred_path].loss = np.inf + except Exception as e: self.logger.error( - "Failed to delete files of non-candidate model %s due" - " to error %s", - pred_path, - e, + f"Failed to delete files of non-candidate model {pred_path} due" + f" to error {e}", ) def _predictions_from(self, path: str | Path) -> np.ndarray: diff --git a/test/test_ensemble_builder/test_3_models/test_3_models.py b/test/test_ensemble_builder/test_3_models/test_3_models.py index c0d0dfdaa6..7235f6adc2 100644 --- a/test/test_ensemble_builder/test_3_models/test_3_models.py +++ b/test/test_ensemble_builder/test_3_models/test_3_models.py @@ -32,16 +32,16 @@ def test_read(ensemble_backend: Backend) -> None: assert success, f"run_predictions = {str(ensbuilder.run_predictions)}" assert len(ensbuilder.run_predictions) == 3, ensbuilder.run_predictions.keys() - assert len(ensbuilder.run_info) == 3, ensbuilder.run_info.keys() + assert len(ensbuilder.runs) == 3, ensbuilder.runs runsdir = Path(ensemble_backend.get_runs_directory()) preds_1 = runsdir / "0_1_0.0" / "predictions_ensemble_0_1_0.0.npy" preds_2 = runsdir / "0_2_0.0" / "predictions_ensemble_0_2_0.0.npy" preds_3 = runsdir / "0_3_100.0" / "predictions_ensemble_0_3_100.0.npy" - assert ensbuilder.run_info[str(preds_1)]["ens_loss"] == 0.5 - assert ensbuilder.run_info[str(preds_2)]["ens_loss"] == 0.0 - assert ensbuilder.run_info[str(preds_3)]["ens_loss"] == 0.0 + assert ensbuilder.runs[str(preds_1)].loss == 0.5 + assert ensbuilder.runs[str(preds_2)].loss == 0.0 + assert ensbuilder.runs[str(preds_3)].loss == 0.0 @parametrize( @@ -184,7 +184,7 @@ def test_fall_back_nbest(ensemble_backend: Backend) -> None: ensemble_backend.temporary_directory, f".auto-sklearn/runs/{model}/predictions_ensemble_{model}.npy", ) - ensbuilder.run_info[filename]["ens_loss"] = -1 + ensbuilder.runs[filename].loss = -1 sel_keys = ensbuilder.get_n_best_preds() @@ -330,7 +330,7 @@ def test_main(ensemble_backend: Backend) -> None: Expects ------- - * There should be "run_predictions" and "run_info" saved to file + * There should be "run_predictions" and "runs" saved to file * There should be 3 model reads * There should be a hash for the preds read in * The true targets should have been read in @@ -358,7 +358,7 @@ def test_main(ensemble_backend: Backend) -> None: internals_dir = Path(ensemble_backend.internals_directory) assert ensbuilder.run_predictions_path.exists(), list(internals_dir.iterdir()) - assert ensbuilder.run_info_path.exists(), list(internals_dir.iterdir()) + assert ensbuilder.runs_path.exists(), list(internals_dir.iterdir()) # There should be three preds read assert len(ensbuilder.run_predictions) == 3 @@ -457,7 +457,7 @@ def mtime_mock(filename: str) -> float: for i, exp_state in enumerate(intermediate_states, start=1): ensbuilder.run(time_left=1000, iteration=0, pynisher_context="fork") - assert ensbuilder.run_info_path.exists() + assert ensbuilder.runs_path.exists() assert not ensbuilder.run_predictions_path.exists() assert mock_logger.warning.call_count == i # type: ignore @@ -469,7 +469,7 @@ def mtime_mock(filename: str) -> float: # change it's internal state ensbuilder.run(time_left=1000, iteration=0, pynisher_context="fork") - assert ensbuilder.run_info_path.exists() + assert ensbuilder.runs_path.exists() assert not ensbuilder.run_predictions_path.exists() assert (ensbuilder.ensemble_nbest, ensbuilder.read_at_most) == final_state diff --git a/test/test_ensemble_builder/test_ensemble_builder.py b/test/test_ensemble_builder/test_ensemble_builder.py index 89f6deef00..61f8afcced 100644 --- a/test/test_ensemble_builder/test_ensemble_builder.py +++ b/test/test_ensemble_builder/test_ensemble_builder.py @@ -15,6 +15,7 @@ Y_TEST, Y_VALID, EnsembleBuilder, + Run, ) from autosklearn.metrics import roc_auc @@ -95,28 +96,31 @@ def test_max_models_on_disc_with_float_selects_expected_models( """ # These are arranged so the last one is best, with the lose loss - run_info = [ - { - "ens_loss": 10 * -n, - "num_run": n, - "loaded": 1, - "seed": DEFAULT_SEED, - "disc_space_cost_mb": mem_model, - } + runs = [ + Run( + seed=DEFAULT_SEED, + num_run=n, + budget=0.0, + loss=10 * -n, + loaded=1, + mem_usage=mem_model, + ens_file=f"pred{n}", + ) for n in range(1, n_models + 1) ] mem_largest = mem_model * mem_largest_mult if largest_is_best: - run_info[-1]["disc_space_cost_mb"] = mem_largest + runs[-1].mem_usage = mem_largest else: - run_info[0]["disc_space_cost_mb"] = mem_largest + runs[0].mem_usage = mem_largest - nbest = sorted(run_info, key=lambda item: item["ens_loss"])[:n_expected] - mem_for_nbest = sum(item["disc_space_cost_mb"] for item in nbest) + nbest = sorted(runs, key=lambda run: run.loss)[:n_expected] + mem_for_nbest = sum(run.mem_usage for run in nbest) slack = mem_largest # Slack introduced is the size of the largest model - max_models_on_disc = float(mem_for_nbest + slack) + max_models_on_disc = float(mem_for_nbest + slack) # type: ignore + print(max_models_on_disc) ensbuilder = EnsembleBuilder( backend=dummy_backend, @@ -129,16 +133,7 @@ def test_max_models_on_disc_with_float_selects_expected_models( ) # Enter the models, with each model being progressibly better - ensbuilder._run_info = { - f"pred{i}": v for i, v in enumerate(run_info, start=1) - } - - # Make the last model twice as large - if largest_is_best: - ensbuilder.run_info[f"pred{n_models}"]["disc_space_cost_mb"] = mem_largest - else: - ensbuilder.run_info["pred1"]["disc_space_cost_mb"] = mem_largest - + ensbuilder._runs = {f"pred{i}": run for i, run in enumerate(runs, start=1)} ensbuilder._run_predictions = { f"pred{n}": {Y_ENSEMBLE: np.array([1])} for n in range(1, n_models + 1) } @@ -153,7 +148,8 @@ def test_max_models_on_disc_with_float_selects_expected_models( @parametrize("n_models", [50, 10, 2, 1]) def test_max_models_on_disc_float_always_preserves_best_model( - n_models: int, dummy_backend: Backend + n_models: int, + dummy_backend: Backend, ) -> None: """ Parameters @@ -183,25 +179,27 @@ def test_max_models_on_disc_float_always_preserves_best_model( memory_limit=None, ) - run_info = { - f"pred{n}": { - "ens_loss": 10 * -n, - "num_run": n + 1, - "loaded": 1, - "seed": DEFAULT_SEED, - "disc_space_cost_mb": 50 * n, - } + runs = [ + Run( + seed=DEFAULT_SEED, + num_run=n + 1, + budget=0.0, + loss=10 * -n, + loaded=1, + mem_usage=50 * n, + ens_file=f"pred{n+1}", + ) for n in range(n_models) - } - best_model = min(run_info, key=lambda m: run_info[m]["ens_loss"]) + ] + best_model = min(runs, key=lambda run: run.loss) - ensbuilder._run_info = run_info + ensbuilder._runs = {run.ens_file: run for run in runs} ensbuilder._run_predictions = { - f"pred{n}": {Y_ENSEMBLE: np.array([1])} for n in range(n_models) + f"pred{n}": {Y_ENSEMBLE: np.array([1])} for n in range(1, n_models + 1) } sel_keys = ensbuilder.get_n_best_preds() - assert [best_model] == sel_keys + assert [best_model.ens_file] == sel_keys @parametrize( @@ -209,7 +207,9 @@ def test_max_models_on_disc_float_always_preserves_best_model( ((0.0, 4), (0.1, 4), (0.3, 3), (0.5, 2), (0.6, 2), (0.8, 1), (1.0, 1)), ) def test_performance_range_threshold( - performance_range_threshold: float, expected_selected: int, dummy_backend: Backend + performance_range_threshold: float, + expected_selected: int, + dummy_backend: Backend, ) -> None: """ Parameters @@ -238,16 +238,17 @@ def test_performance_range_threshold( seed=DEFAULT_SEED, performance_range_threshold=performance_range_threshold, ) - ensbuilder._run_info = { - "A": {"ens_loss": -1, "num_run": 1, "loaded": -1, "seed": DEFAULT_SEED}, - "B": {"ens_loss": -2, "num_run": 2, "loaded": -1, "seed": DEFAULT_SEED}, - "C": {"ens_loss": -3, "num_run": 3, "loaded": -1, "seed": DEFAULT_SEED}, - "D": {"ens_loss": -4, "num_run": 4, "loaded": -1, "seed": DEFAULT_SEED}, - "E": {"ens_loss": -5, "num_run": 5, "loaded": -1, "seed": DEFAULT_SEED}, + + ensbuilder._runs = { + "A": Run(seed=DEFAULT_SEED, num_run=1, loss=-1, loaded=-1, ens_file=""), + "B": Run(seed=DEFAULT_SEED, num_run=2, loss=-2, loaded=-1, ens_file=""), + "C": Run(seed=DEFAULT_SEED, num_run=3, loss=-3, loaded=-1, ens_file=""), + "D": Run(seed=DEFAULT_SEED, num_run=4, loss=-4, loaded=-1, ens_file=""), + "E": Run(seed=DEFAULT_SEED, num_run=5, loss=-5, loaded=-1, ens_file=""), } ensbuilder._run_predictions = { name: {preds_key: np.array([1]) for preds_key in (Y_ENSEMBLE, Y_VALID, Y_TEST)} - for name in ensbuilder._run_info + for name in ensbuilder._runs } sel_keys = ensbuilder.get_n_best_preds() @@ -301,16 +302,16 @@ def test_performance_range_threshold_with_ensemble_nbest( performance_range_threshold=performance_range_threshold, max_models_on_disc=None, ) - ensbuilder._run_info = { - "A": {"ens_loss": -1, "num_run": 1, "loaded": -1, "seed": DEFAULT_SEED}, - "B": {"ens_loss": -2, "num_run": 2, "loaded": -1, "seed": DEFAULT_SEED}, - "C": {"ens_loss": -3, "num_run": 3, "loaded": -1, "seed": DEFAULT_SEED}, - "D": {"ens_loss": -4, "num_run": 4, "loaded": -1, "seed": DEFAULT_SEED}, - "E": {"ens_loss": -5, "num_run": 5, "loaded": -1, "seed": DEFAULT_SEED}, + ensbuilder._runs = { + "A": Run(seed=DEFAULT_SEED, num_run=1, loss=-1, loaded=-1, ens_file=""), + "B": Run(seed=DEFAULT_SEED, num_run=2, loss=-2, loaded=-1, ens_file=""), + "C": Run(seed=DEFAULT_SEED, num_run=3, loss=-3, loaded=-1, ens_file=""), + "D": Run(seed=DEFAULT_SEED, num_run=4, loss=-4, loaded=-1, ens_file=""), + "E": Run(seed=DEFAULT_SEED, num_run=5, loss=-5, loaded=-1, ens_file=""), } ensbuilder._run_predictions = { name: {pred_name: np.array([1]) for pred_name in (Y_ENSEMBLE, Y_VALID, Y_TEST)} - for name in ensbuilder._run_info + for name in ensbuilder._runs } sel_keys = ensbuilder.get_n_best_preds() From c7c77c0d85e6e116860a92f6d7459e340bb173a1 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 29 Mar 2022 22:36:23 +0200 Subject: [PATCH 031/117] Formatting --- autosklearn/ensemble_building/manager.py | 4 ++-- autosklearn/util/disk.py | 2 +- autosklearn/util/functional.py | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/autosklearn/ensemble_building/manager.py b/autosklearn/ensemble_building/manager.py index a062a3a3e2..638b312c54 100644 --- a/autosklearn/ensemble_building/manager.py +++ b/autosklearn/ensemble_building/manager.py @@ -13,10 +13,10 @@ from smac.runhistory.runhistory import RunInfo, RunValue from smac.tae.base import StatusType +from autosklearn.automl_common.common.utils.backend import Backend +from autosklearn.ensemble_building.builder import EnsembleBuilder from autosklearn.metrics import Scorer from autosklearn.util.logging_ import get_named_client_logger -from autosklearn.ensemble_building.builder import EnsembleBuilder -from autosklearn.automl_common.common.utils.backend import Backend class EnsembleBuilderManager(IncorporateRunResultCallback): diff --git a/autosklearn/util/disk.py b/autosklearn/util/disk.py index 0b663c26d3..14a8ee64f7 100644 --- a/autosklearn/util/disk.py +++ b/autosklearn/util/disk.py @@ -1,7 +1,7 @@ from __future__ import annotations -from pathlib import Path import math +from pathlib import Path sizes = { "B": 0, diff --git a/autosklearn/util/functional.py b/autosklearn/util/functional.py index 5eb59d97a8..15d3577f28 100644 --- a/autosklearn/util/functional.py +++ b/autosklearn/util/functional.py @@ -1,8 +1,9 @@ from __future__ import annotations -from typing import Optional, TypeVar, Iterable +from typing import Iterable, Optional, TypeVar from functools import reduce + import numpy as np T = TypeVar("T") From a7dee5eafb0a21940ed57a2751f3f6f10cac424b Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 29 Mar 2022 23:27:20 +0200 Subject: [PATCH 032/117] Reduce side effects of `compute_loss_per_model` To make testing easier and changes easier, the targets are now passed to the method. This also reduces it's complexity by removing the checking from the method as we can assume the parameters coming in are correct. --- autosklearn/ensemble_building/builder.py | 152 +++++++++++------- .../test_3_models/test_3_models.py | 38 ++++- 2 files changed, 120 insertions(+), 70 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index cd3d65d519..8d944a0977 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -174,6 +174,15 @@ def __init__( if max_models_on_disc is not None and max_models_on_disc < 0: raise ValueError("max_models_on_disc must be positive or None") + # Setup the logger + self.logger = get_named_client_logger(name="EnsembleBuilder", port=logger_port) + self.logger_port = logger_port + + # Log the behaviour + if ensemble_nbest == 1: + t = type(ensemble_nbest) + self.logger.debug(f"Using behaviour when {t} for {ensemble_nbest}:{t}") + self.seed = seed self.metric = metric self.backend = backend @@ -194,36 +203,23 @@ def __init__( # The starting time of the procedure self.start_time = 0 - # The cached values of the true targets for the ensemble - self.y_true_ensemble: int | None = None - # Track the ensemble performance self.ensemble_history = [] - # Setup the logger - self.logger = get_named_client_logger(name="EnsembleBuilder", port=logger_port) - self.logger_port = logger_port - # Keep running knowledge of its validation performance self.validation_performance_ = np.inf # Data we may need datamanager = self.backend.load_datamanager() - self.y_valid = datamanager.data.get("Y_valid") - self.y_test = datamanager.data.get("Y_test") + self.y_valid: np.ndarray | None = datamanager.data.get("Y_valid", None) + self.y_test: np.ndarray | None = datamanager.data.get("Y_test", None) + self._y_ensemble: np.ndarray | None = None - # Log the behaviour - if ensemble_nbest == 1: - t = type(ensemble_nbest) - self.logger.debug(f"Using behaviour when {t} for {ensemble_nbest}:{t}") - - # The cached set of run_predictions which could come from previous instances + # Cached items, loaded by properties + # Check the corresponing properties for descriptions + self._run_prediction_paths: list[str] | None = None self._run_predictions: dict[str, dict[int, np.ndarray]] | None = None - - # Hash of the last ensemble training data to identify it self._last_hash: str | None = None - - # The cached info of runs which could come from previous instances self._runs: dict[str, Run] | None = None @property @@ -284,6 +280,49 @@ def runs(self) -> dict[str, Run]: return self._runs + @property + def run_ensemble_prediction_paths(self) -> list[str]: + """Get the all available predictions paths that the ensemble builder can use + + Returns + ------- + list[str] + The list of paths + """ + if self._run_prediction_paths is None: + pred_path = os.path.join( + glob.escape(self.backend.get_runs_directory()), + "%d_*_*" % self.seed, + "predictions_ensemble_%s_*_*.npy*" % self.seed, + ) + y_ens_files = glob.glob(pred_path) + y_ens_files = [ + y_ens_file + for y_ens_file in y_ens_files + if y_ens_file.endswith(".npy") or y_ens_file.endswith(".npy.gz") + ] + self._run_prediction_paths = y_ens_files + + return self._run_prediction_paths + + @property + def y_ensemble(self) -> np.ndarray | None: + """The ensemble targets used for training the ensemble + + It will attempt to load and cache them in memory but + return None if it can't. + + Returns + ------- + np.ndarray | None + The ensemble targets, if they can be loaded + """ + if self._y_ensemble is None: + if os.path.exists(self.backend._get_targets_ensemble_filename()): + self._y_ensemble = self.backend.load_targets_ensemble() + + return self._y_ensemble + def run( self, iteration: int, @@ -460,8 +499,14 @@ def main( time_left - used_time, ) - # populates self.run_predictions and self.runs - if not self.compute_loss_per_model(): + # No predictions found, exit early + if len(self.run_ensemble_prediction_paths) == 0: + self.logger.debug("Found no predictions on ensemble data set") + return self.ensemble_history, self.ensemble_nbest, None, None, None + + # Can't load data, exit early + if not os.path.exists(self.backend._get_targets_ensemble_filename()): + self.logger.debug(f"No targets for ensemble: {traceback.format_exc()}") if return_predictions: return ( self.ensemble_history, @@ -473,6 +518,8 @@ def main( else: return self.ensemble_history, self.ensemble_nbest, None, None, None + self.compute_loss_per_model(targets=self.y_ensemble) + # Only the models with the n_best predictions are candidates # to be in the ensemble candidate_models = self.get_n_best_preds() @@ -603,55 +650,29 @@ def main( else: return self.ensemble_history, self.ensemble_nbest, None, None, None - def compute_loss_per_model(self) -> bool: + def compute_loss_per_model( + self, + targets: np.ndarray, + ) -> None: """Compute the loss of the predictions on ensemble building data set; populates self.run_predictions and self.runs Side-effects ------------ * Populates - - `self.y_ens_files` all the ensemble predictions it could find for runs - `self.runs` with the new losses it calculated - Returns - ------- - bool - Whether it successfully computed losses + Parameters + ---------- + targets: np.ndarray + The targets for which to calculate the losses on. + Typically the ensemble_targts. """ self.logger.debug("Read ensemble data set predictions") - if self.y_true_ensemble is None: - try: - self.y_true_ensemble = self.backend.load_targets_ensemble() - except FileNotFoundError: - self.logger.debug( - "Could not find true targets on ensemble data set: %s", - traceback.format_exc(), - ) - return False - - pred_path = os.path.join( - glob.escape(self.backend.get_runs_directory()), - "%d_*_*" % self.seed, - "predictions_ensemble_%s_*_*.npy*" % self.seed, - ) - y_ens_files = glob.glob(pred_path) - y_ens_files = [ - y_ens_file - for y_ens_file in y_ens_files - if y_ens_file.endswith(".npy") or y_ens_file.endswith(".npy.gz") - ] - self.y_ens_files = y_ens_files - # no validation predictions so far -- no files - if len(self.y_ens_files) == 0: - self.logger.debug( - "Found no prediction files on ensemble data set:" " %s" % pred_path - ) - return False - # First sort files chronologically to_read = [] - for y_ens_fn in self.y_ens_files: + for y_ens_fn in self.run_ensemble_prediction_paths: match = self.model_fn_re.search(y_ens_fn) _seed = int(match.group(1)) _num_run = int(match.group(2)) @@ -702,7 +723,7 @@ def compute_loss_per_model(self) -> bool: try: y_ensemble = self._predictions_from(y_ens_fn) loss = calculate_loss( - solution=self.y_true_ensemble, + solution=targets, prediction=y_ensemble, task_type=self.task_type, metric=self.metric, @@ -735,7 +756,6 @@ def compute_loss_per_model(self) -> bool: f"Done reading {n_read_files} new prediction files." f"Loaded {n_files_read} predictions in total." ) - return True def get_n_best_preds(self) -> list[str]: """Get best n predictions according to the loss on the "ensemble set" @@ -1043,7 +1063,9 @@ def fit_ensemble(self, selected_keys: list[str]) -> EnsembleSelection: self.logger.debug(f"Fitting ensemble on {len(predictions_train)} models") start_time = time.time() - ensemble.fit(predictions_train, self.y_true_ensemble, include_num_runs) + + # TODO y_ensemble can be None here + ensemble.fit(predictions_train, self.y_ensemble, include_num_runs) duration = time.time() - start_time @@ -1160,10 +1182,11 @@ def _add_ensemble_trajectory( ((1 - test_pred).reshape((1, -1)), test_pred.reshape((1, -1))) ).transpose() + # TODO y_ensemble can be None here performance_stamp = { "Timestamp": pd.Timestamp.now(), "ensemble_optimization_score": calculate_score( - solution=self.y_true_ensemble, + solution=self.y_ensemble, prediction=train_pred, task_type=self.task_type, metric=self.metric, @@ -1199,9 +1222,14 @@ def _delete_excess_models(self, selected_keys: list[str]) -> None: defines the upper limit on how many models to keep. Any additional model with a worst loss than the top self.max_models_on_disc is deleted. + + Parameters + ---------- + selected_keys: list[str] + TODO """ # Loop through the files currently in the directory - for pred_path in self.y_ens_files: + for pred_path in self.run_ensemble_prediction_paths: # Do not delete candidates if pred_path in selected_keys: diff --git a/test/test_ensemble_builder/test_3_models/test_3_models.py b/test/test_ensemble_builder/test_3_models/test_3_models.py index 7235f6adc2..fe20f13b36 100644 --- a/test/test_ensemble_builder/test_3_models/test_3_models.py +++ b/test/test_ensemble_builder/test_3_models/test_3_models.py @@ -28,8 +28,10 @@ def test_read(ensemble_backend: Backend) -> None: seed=DEFAULT_SEED, # important to find the test files ) - success = ensbuilder.compute_loss_per_model() - assert success, f"run_predictions = {str(ensbuilder.run_predictions)}" + targets = ensbuilder.y_ensemble + assert targets is not None + + ensbuilder.compute_loss_per_model(targets) assert len(ensbuilder.run_predictions) == 3, ensbuilder.run_predictions.keys() assert len(ensbuilder.runs) == 3, ensbuilder.runs @@ -94,7 +96,10 @@ def test_nbest( max_models_on_disc=max_models_on_disc, ) - ensbuilder.compute_loss_per_model() + targets = ensbuilder.y_ensemble + assert targets is not None + + ensbuilder.compute_loss_per_model(targets) sel_keys = ensbuilder.get_n_best_preds() assert len(sel_keys) == expected @@ -159,7 +164,11 @@ def test_max_models_on_disc( with patch("autosklearn.ensemble_building.builder.sizeof") as mock: mock.return_value = 500 - ensbuilder.compute_loss_per_model() + targets = ensbuilder.y_ensemble + assert targets is not None + + ensbuilder.compute_loss_per_model(targets) + sel_keys = ensbuilder.get_n_best_preds() assert mock.called print(mock.call_args_list) @@ -177,7 +186,10 @@ def test_fall_back_nbest(ensemble_backend: Backend) -> None: ensemble_nbest=1, ) - ensbuilder.compute_loss_per_model() + targets = ensbuilder.y_ensemble + assert targets is not None + + ensbuilder.compute_loss_per_model(targets) for model in ["0_1_0.0", "0_2_0.0", "0_3_100.0"]: filename = os.path.join( @@ -227,7 +239,10 @@ def test_get_valid_test_preds(ensemble_backend: Backend) -> None: for model in ["0_1_0.0", "0_2_0.0", "0_3_100.0"] ] - ensbuilder.compute_loss_per_model() + targets = ensbuilder.y_ensemble + assert targets is not None + + ensbuilder.compute_loss_per_model(targets) sel_keys = ensbuilder.get_n_best_preds() assert len(sel_keys) == 1 @@ -275,7 +290,10 @@ def test_ensemble_builder_predictions(ensemble_backend: Backend) -> None: ensemble_nbest=2, ) - ensbuilder.compute_loss_per_model() + targets = ensbuilder.y_ensemble + assert targets is not None + + ensbuilder.compute_loss_per_model(targets) d2 = os.path.join( ensemble_backend.temporary_directory, @@ -286,6 +304,8 @@ def test_ensemble_builder_predictions(ensemble_backend: Backend) -> None: assert len(sel_keys) > 0 ensemble = ensbuilder.fit_ensemble(selected_keys=sel_keys) + + assert ensemble is not None print(ensemble, sel_keys) n_sel_valid, n_sel_test = ensbuilder.get_valid_test_preds(selected_keys=sel_keys) @@ -311,6 +331,8 @@ def test_ensemble_builder_predictions(ensemble_backend: Backend) -> None: # predictions for valid and test are the same # --> should results in the same predictions + assert y_valid is not None + assert y_test is not None np.testing.assert_array_almost_equal(y_valid, y_test) # since d2 provides perfect predictions @@ -363,7 +385,7 @@ def test_main(ensemble_backend: Backend) -> None: # There should be three preds read assert len(ensbuilder.run_predictions) == 3 assert ensbuilder.last_hash is not None - assert ensbuilder.y_true_ensemble is not None + assert ensbuilder.y_ensemble is not None # We expect as many iterations as the iters param assert len(run_history) == iters From b6c3e902842291a190403d2f5c45a3ac0a5e2087 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Wed, 30 Mar 2022 10:14:56 +0200 Subject: [PATCH 033/117] Change Tuple to tuple --- autosklearn/ensemble_building/builder.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 8d944a0977..f9d51e58e7 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any, Tuple +from typing import Any import glob import logging.handlers @@ -331,7 +331,7 @@ def run( end_at: float | None = None, time_buffer: int = 5, return_predictions: bool = False, - ) -> Tuple[ + ) -> tuple[ list[dict[str, Any]], int, np.ndarray | None, @@ -457,7 +457,7 @@ def main( time_left: float, iteration: int, return_predictions: bool = False, - ) -> Tuple[ + ) -> tuple[ list[dict[str, Any]], int, np.ndarray | None, @@ -952,7 +952,7 @@ def get_n_best_preds(self) -> list[str]: def get_valid_test_preds( self, selected_keys: list[str], - ) -> Tuple[list[str], list[str]]: + ) -> tuple[list[str], list[str]]: """Get valid and test predictions from disc and store in self.run_predictions Parameters From 45c94e0fbf909bd4c5025e6aa058078a52c234b0 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Wed, 30 Mar 2022 18:00:50 +0200 Subject: [PATCH 034/117] Forcibly add data files for tests --- .../.auto-sklearn/predictions_ensemble_true.npy | Bin 0 -> 160 bytes .../runs/0_1_0.0/predictions_ensemble_0_1_0.0.npy | Bin 0 -> 160 bytes .../runs/0_1_0.0/predictions_test_0_1_0.0.npy | Bin 0 -> 160 bytes .../runs/0_1_0.0/predictions_valid_0_1_0.0.npy | Bin 0 -> 160 bytes .../runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy | Bin 0 -> 160 bytes .../runs/0_2_0.0/predictions_test_0_2_0.0.npy | Bin 0 -> 160 bytes .../runs/0_2_0.0/predictions_valid_0_2_0.0.npy | Bin 0 -> 160 bytes .../0_3_100.0/predictions_ensemble_0_3_100.0.npy | Bin 0 -> 160 bytes .../runs/0_3_100.0/predictions_test_0_3_100.0.npy | Bin 0 -> 160 bytes .../0_3_100.0/predictions_valid_0_3_100.0.npy | Bin 0 -> 160 bytes .../data/.auto-sklearn/true_targets_ensemble.npy | Bin 0 -> 160 bytes 11 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/predictions_ensemble_true.npy create mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/predictions_ensemble_0_1_0.0.npy create mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/predictions_test_0_1_0.0.npy create mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/predictions_valid_0_1_0.0.npy create mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy create mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.npy create mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_valid_0_2_0.0.npy create mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/predictions_ensemble_0_3_100.0.npy create mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/predictions_test_0_3_100.0.npy create mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/predictions_valid_0_3_100.0.npy create mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/true_targets_ensemble.npy diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/predictions_ensemble_true.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/predictions_ensemble_true.npy new file mode 100644 index 0000000000000000000000000000000000000000..fee3160c86d8995cb5ece8126aae88f13a964629 GIT binary patch literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g literal 0 HcmV?d00001 diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/predictions_ensemble_0_1_0.0.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/predictions_ensemble_0_1_0.0.npy new file mode 100644 index 0000000000000000000000000000000000000000..1b2320113d4ffe309dff0f30b4adb5c434b84d52 GIT binary patch literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= eXCxM+0{I%IItoUbItsN4aKOa?1&lBTg?s>2!WBvY literal 0 HcmV?d00001 diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/predictions_test_0_1_0.0.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/predictions_test_0_1_0.0.npy new file mode 100644 index 0000000000000000000000000000000000000000..1b2320113d4ffe309dff0f30b4adb5c434b84d52 GIT binary patch literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= eXCxM+0{I%IItoUbItsN4aKOa?1&lBTg?s>2!WBvY literal 0 HcmV?d00001 diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/predictions_valid_0_1_0.0.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/predictions_valid_0_1_0.0.npy new file mode 100644 index 0000000000000000000000000000000000000000..1b2320113d4ffe309dff0f30b4adb5c434b84d52 GIT binary patch literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= eXCxM+0{I%IItoUbItsN4aKOa?1&lBTg?s>2!WBvY literal 0 HcmV?d00001 diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy new file mode 100644 index 0000000000000000000000000000000000000000..fee3160c86d8995cb5ece8126aae88f13a964629 GIT binary patch literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g literal 0 HcmV?d00001 diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.npy new file mode 100644 index 0000000000000000000000000000000000000000..fee3160c86d8995cb5ece8126aae88f13a964629 GIT binary patch literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g literal 0 HcmV?d00001 diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_valid_0_2_0.0.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_valid_0_2_0.0.npy new file mode 100644 index 0000000000000000000000000000000000000000..fee3160c86d8995cb5ece8126aae88f13a964629 GIT binary patch literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g literal 0 HcmV?d00001 diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/predictions_ensemble_0_3_100.0.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/predictions_ensemble_0_3_100.0.npy new file mode 100644 index 0000000000000000000000000000000000000000..fee3160c86d8995cb5ece8126aae88f13a964629 GIT binary patch literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g literal 0 HcmV?d00001 diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/predictions_test_0_3_100.0.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/predictions_test_0_3_100.0.npy new file mode 100644 index 0000000000000000000000000000000000000000..fee3160c86d8995cb5ece8126aae88f13a964629 GIT binary patch literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g literal 0 HcmV?d00001 diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/predictions_valid_0_3_100.0.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/predictions_valid_0_3_100.0.npy new file mode 100644 index 0000000000000000000000000000000000000000..fee3160c86d8995cb5ece8126aae88f13a964629 GIT binary patch literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g literal 0 HcmV?d00001 diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/true_targets_ensemble.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/true_targets_ensemble.npy new file mode 100644 index 0000000000000000000000000000000000000000..fee3160c86d8995cb5ece8126aae88f13a964629 GIT binary patch literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g literal 0 HcmV?d00001 From 9dee1e81e31184f03010f9efc6f56ee05dd68ac3 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 1 Apr 2022 12:08:26 +0200 Subject: [PATCH 035/117] Fix: Can now load pickled numpy arrays w/ test --- autosklearn/ensemble_building/builder.py | 4 +- .../test_ensemble_builder.py | 38 +++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index f9d51e58e7..29b21a78b1 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -1268,7 +1268,9 @@ def _predictions_from(self, path: str | Path) -> np.ndarray: precision = self.precision with path.open("rb") as f: - predictions = np.load(f) + # TODO: We should probably remove this requirement. I'm not sure why model + # predictions are being saved as pickled + predictions = np.load(f, allow_pickle=True) dtypes = { 16: np.float16, diff --git a/test/test_ensemble_builder/test_ensemble_builder.py b/test/test_ensemble_builder/test_ensemble_builder.py index 61f8afcced..fd873f6ae4 100644 --- a/test/test_ensemble_builder/test_ensemble_builder.py +++ b/test/test_ensemble_builder/test_ensemble_builder.py @@ -358,3 +358,41 @@ def test_run_end_at(dummy_backend: Backend, time_buffer: int, duration: int) -> # The 1 comes from the small overhead in conjuction with rounding down expected = duration - time_buffer - 1 assert pynisher_mock.call_args_list[0][1]["wall_time_in_s"] == expected + + +def test_can_load_pickled_ndarray_of_dtype_object(dummy_backend: Backend) -> None: + """ + Fixture + ------- + dummy_backend: Backend + A backend with a datamanger so it will load + + Expects + ------- + * EnsembleBuilder should be able to load np.ndarray's that were saved as a pickled + object, which happens when the np.ndarray's are of dtype object. + + """ + # TODO Should probably remove this test + # + # I'm not sure why the predictions are stored as pickled objects sometimes + # but that's a security vunerability to users using auto-sklearn. + # + ensbuilder = EnsembleBuilder( + backend=dummy_backend, + dataset_name="TEST", + task_type=BINARY_CLASSIFICATION, + metric=roc_auc, + ) + + # By specifiyng dtype object, we force it into saving as a pickle + x = np.array([1, 2, 3, 4], dtype=object) + + path = Path(dummy_backend.internals_directory) / "test.npy" + with path.open("wb") as f: + # This is the default value (allow_pickle=True) but we explicitly state it + np.save(f, x, allow_pickle=True) + + loaded_x = ensbuilder._predictions_from(path) + + np.testing.assert_equal(x, loaded_x) From f403c8da909ce3928a594a1c3e929bc464f07d08 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 1 Apr 2022 14:16:04 +0200 Subject: [PATCH 036/117] Add test for checking ensemble builder output --- .../test_ensemble_builder_real.py | 48 ++++++++++++++++--- 1 file changed, 41 insertions(+), 7 deletions(-) diff --git a/test/test_ensemble_builder/test_ensemble_builder_real.py b/test/test_ensemble_builder/test_ensemble_builder_real.py index 94ba79c7ff..ad8ce3f0bb 100644 --- a/test/test_ensemble_builder/test_ensemble_builder_real.py +++ b/test/test_ensemble_builder/test_ensemble_builder_real.py @@ -1,10 +1,13 @@ -from typing import Callable +from __future__ import annotations + +from typing import Any, Callable from pathlib import Path from shutil import rmtree from autosklearn.automl import AutoML from autosklearn.automl_common.common.utils.backend import Backend +from autosklearn.ensemble_building.builder import EnsembleBuilder from pytest_cases import parametrize_with_cases @@ -12,11 +15,11 @@ @parametrize_with_cases("automl", cases=cases, has_tag="fitted") -def case_automl_cases( +def case_ensemble_builder_with_real_runs( tmp_path: Path, automl: AutoML, make_backend: Callable[..., Backend], -) -> Backend: +) -> tuple[Backend, dict[str, Any]]: """Gives the backend for from the cached automl instance in `test_automl/cases.py` We do this by copying the backend produced from these cached automl runs to a new @@ -29,6 +32,7 @@ def case_automl_cases( backend_path = tmp_path / "backend" backend = make_backend(path=backend_path, template=original_backend) + assert backend.internals_directory != original_backend.internals_directory ensemble_dir = Path(backend.get_ensemble_dir()) if ensemble_dir.exists(): @@ -38,9 +42,39 @@ def case_automl_cases( if ensemble_hist.exists(): ensemble_hist.unlink() - return backend + # This is extra information required to build the ensemble builder exactly + # as was created by the AutoML object + builder = EnsembleBuilder( + backend=backend, + dataset_name=automl._dataset_name, # type: ignore is not None + task_type=automl._task, # type: ignore is not None + metric=automl._metric, # type: ignore is not None + seed=automl._seed, + max_models_on_disc=automl._max_models_on_disc, + memory_limit=automl._memory_limit, + ) + return builder + + +@parametrize_with_cases("builder", cases=case_ensemble_builder_with_real_runs) +def test_outputs(builder: EnsembleBuilder) -> None: + """ + Fixtures + -------- + builder: EnsembleBuilder + An EnsembleBuilder created from the contents of a real autosklearn AutoML run + + Expects + ------- + * Should generate cached items "ensemble_read_preds" and ensemble_read_losses" + * Should generate an ensembles directory which contains at least one ensemble + """ + builder.main(time_left=10, iteration=0) + + for path in [builder.run_predictions_path, builder.runs_path]: + assert path.exists(), f"contents = {list(dir.iterdir())}" + ens_dir = Path(builder.backend.get_ensemble_dir()) -@parametrize_with_cases("ensemble_backend", cases=case_automl_cases) -def test_something(ensemble_backend: Backend) -> None: - return + assert ens_dir.exists() + assert len(list(ens_dir.iterdir())) > 0 From 39f7f8331e3593db2aa4d1e42190c72b525de450 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 1 Apr 2022 14:16:20 +0200 Subject: [PATCH 037/117] Fix bug with using list instead of set --- autosklearn/ensemble_building/builder.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 29b21a78b1..969ff63486 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -560,13 +560,6 @@ def main( else: return self.ensemble_history, self.ensemble_nbest, None, None, None - # If valid/test predictions loaded, then reduce candidate models to this set - # If any of n_sel_* is not empty and overlaps with candidate_models, - # then ensure candidate_models AND n_sel_test are sorted the same - candidates_set = set(candidate_models) - valid_set = set(n_sel_valid) - test_set = set(n_sel_test) - intersect = intersection(candidates_set, valid_set, test_set) if len(intersect) > 0: candidate_models = sorted(list(intersect)) @@ -577,7 +570,7 @@ def main( candidate_models = sorted(list(candidates_set & valid_set)) n_sel_valid = candidate_models - elif len(candidates_set & n_sel_test) > 0: + elif len(candidates_set & test_set) > 0: candidate_models = sorted(list(candidates_set & test_set)) n_sel_test = candidate_models From b39db86e62f7b63a52424db56a3465a32ae89d93 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 1 Apr 2022 14:26:23 +0200 Subject: [PATCH 038/117] Making deubgging message a little clearer --- autosklearn/ensemble_building/builder.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 969ff63486..9c273e6b8e 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -821,10 +821,7 @@ def get_n_best_preds(self) -> list[str]: else: # Keep only at most ensemble_nbest keep_nbest = min(self.ensemble_nbest, len(sorted_keys)) - self.logger.debug( - "Library Pruning: using for ensemble only " - " %d (out of %d) models" % (keep_nbest, len(sorted_keys)) - ) + self.logger.debug(f"Using {keep_nbest} of total {len(sorted_keys)} models") # If max_models_on_disc is None, do nothing # One can only read at most max_models_on_disc models From 40e64b02e438415621b38beb5c226fbf45ededdc Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 1 Apr 2022 14:35:27 +0200 Subject: [PATCH 039/117] Fix typing and case name --- test/fixtures/backend.py | 10 ++++++---- test/test_automl/cases.py | 2 +- .../test_ensemble_builder_real.py | 4 ++-- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/test/fixtures/backend.py b/test/fixtures/backend.py index 393bacde28..59a1d99f0f 100644 --- a/test/fixtures/backend.py +++ b/test/fixtures/backend.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Callable, Optional, Union +from typing import Callable import os from distutils.dir_util import copy_tree @@ -50,8 +50,8 @@ def make_backend() -> Callable[..., Backend]: """ # TODO redo once things use paths def _make( - path: Union[str, Path], - template: Optional[Path | Backend] = None, + path: str | Path, + template: Path | Backend | None = None, ) -> Backend: _path = Path(path) if not isinstance(path, Path) else path assert not _path.exists(), "Try passing path / 'backend'" @@ -63,13 +63,15 @@ def _make( ) if template is not None: + dest = Path(backend.temporary_directory) + if isinstance(template, Backend): template = Path(template.temporary_directory) if isinstance(template, Path): assert template.exists() - dest = Path(backend.temporary_directory) copy_tree(str(template), str(dest)) + else: raise NotImplementedError(template) diff --git a/test/test_automl/cases.py b/test/test_automl/cases.py index 74a22a20a8..e18da7f3e8 100644 --- a/test/test_automl/cases.py +++ b/test/test_automl/cases.py @@ -56,7 +56,7 @@ def case_regressor( # ################################### @case(tags=["classifier", "fitted", "holdout", "cached"]) @parametrize("dataset", ["iris"]) -def case_classifier_fitted_holdout( +def case_classifier_fitted_holdout_iterative( automl_cache: Callable[[str], AutoMLCache], dataset: str, make_automl_classifier: Callable[..., AutoMLClassifier], diff --git a/test/test_ensemble_builder/test_ensemble_builder_real.py b/test/test_ensemble_builder/test_ensemble_builder_real.py index ad8ce3f0bb..6788a22b12 100644 --- a/test/test_ensemble_builder/test_ensemble_builder_real.py +++ b/test/test_ensemble_builder/test_ensemble_builder_real.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any, Callable +from typing import Callable from pathlib import Path from shutil import rmtree @@ -19,7 +19,7 @@ def case_ensemble_builder_with_real_runs( tmp_path: Path, automl: AutoML, make_backend: Callable[..., Backend], -) -> tuple[Backend, dict[str, Any]]: +) -> EnsembleBuilder: """Gives the backend for from the cached automl instance in `test_automl/cases.py` We do this by copying the backend produced from these cached automl runs to a new From 35f92c2f65e6fb287710e86f362db3d606f001f0 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 1 Apr 2022 14:36:30 +0200 Subject: [PATCH 040/117] Rename test file to reflect what it tests --- ...est_ensemble_builder.py => test_ensemble_builder_mock_data.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename test/test_ensemble_builder/{test_ensemble_builder.py => test_ensemble_builder_mock_data.py} (100%) diff --git a/test/test_ensemble_builder/test_ensemble_builder.py b/test/test_ensemble_builder/test_ensemble_builder_mock_data.py similarity index 100% rename from test/test_ensemble_builder/test_ensemble_builder.py rename to test/test_ensemble_builder/test_ensemble_builder_mock_data.py From fe24b8c4ed335e918c7316d46d6fc82924b0fbaf Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 1 Apr 2022 14:40:28 +0200 Subject: [PATCH 041/117] Make pynisher context optional --- autosklearn/ensemble_building/builder.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 9c273e6b8e..02d11713b0 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -326,7 +326,7 @@ def y_ensemble(self) -> np.ndarray | None: def run( self, iteration: int, - pynisher_context: str, + pynisher_context: str | None = None, time_left: float | None = None, end_at: float | None = None, time_buffer: int = 5, @@ -345,8 +345,9 @@ def run( iteration : int What iteration to associate with this run - pynisher_context : str - The pynisher context to run in + pynisher_context : str | None = None + The pynisher context to run in. If None, defaults to + multiprocessing.get_context(None) time_left : float | None = None How much time should be left for this run. Either this or `end_at` must @@ -394,6 +395,7 @@ def run( wall_time_in_s = int(time_left - time_buffer) if wall_time_in_s < 1: break + context = multiprocessing.get_context(pynisher_context) preload_modules(context) From c2b19e1f10d7424025ff7c4e82ebcf79162add9d Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 1 Apr 2022 17:01:27 +0200 Subject: [PATCH 042/117] Fix loaded models test --- test/test_automl/test_post_fit.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/test/test_automl/test_post_fit.py b/test/test_automl/test_post_fit.py index 674a452d02..7cc6dafb6e 100644 --- a/test/test_automl/test_post_fit.py +++ b/test/test_automl/test_post_fit.py @@ -20,7 +20,10 @@ def test_holdout_loaded_models(automl: AutoML) -> None: * The cv_models_ attr should remain None """ assert automl.ensemble_ is not None - assert set(automl.models_.keys()) == set(automl.ensemble_.identifiers_) + + ensemble_identifiers = automl.ensemble_.get_selected_model_identifiers() + + assert set(automl.models_.keys()) == set(ensemble_identifiers) assert automl.cv_models_ is None @@ -39,8 +42,11 @@ def test_cv_loaded_models(automl: AutoML) -> None: * The cv_models_ should contain the identifiers of what's in the ensemble """ assert automl.ensemble_ is not None - assert set(automl.models_.keys()) == set(automl.ensemble_.identifiers_) - assert set(automl.cv_models_.keys()) == set(automl.ensemble_.identifiers_) + + ensemble_identifiers = automl.ensemble_.get_selected_model_identifiers() + + assert set(automl.models_.keys()) == set(ensemble_identifiers) + assert set(automl.cv_models_.keys()) == set(ensemble_identifiers) @parametrize_with_cases("automl", cases=cases, has_tag=["fitted", "no_ensemble"]) From c644502c179b787405217f794069e09ce2f0caa2 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 1 Apr 2022 17:22:48 +0200 Subject: [PATCH 043/117] Updates to Run dataclass --- autosklearn/ensemble_building/builder.py | 162 +++++++++++++---------- 1 file changed, 91 insertions(+), 71 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 02d11713b0..ad22aef139 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -19,6 +19,7 @@ import numpy as np import pandas as pd import pynisher +from typing_extensions import Literal from autosklearn.automl_common.common.ensemble_building.abstract_ensemble import ( # noqa: E501 AbstractEnsemble, @@ -36,8 +37,6 @@ Y_VALID = 1 Y_TEST = 2 -MODEL_FN_RE = r"_([0-9]*)_([0-9]*)_([0-9]{1,3}\.[0-9]*)\.npy" - @dataclass class Run: @@ -46,26 +45,53 @@ class Run: seed: int num_run: int ens_file: str + dir: Path budget: float = 0.0 loss: float = np.inf - mtime_ens: float = 0 - mtime_test: float = 0 - mtime_valid: float = 0 - mem_usage: int | None = None - loaded: int = 0 + _mem_usage: int | None = None + # The recorded time of ensemble/test/valid predictions modified + recorded_mtime_ensemble: float = 0 + recorded_mtime_test: float = 0 + recorded_mtime_valid: float = 0 # Lazy keys so far: # 0 - not loaded # 1 - loaded and in memory # 2 - loaded but dropped again # 3 - deleted from disk due to space constraints + loaded: int = 0 def is_dummy(self) -> bool: """Whether this run is a dummy run or not""" return self.num_run == 1 - def last_modified(self) -> float: + def was_modified(self, kind: Literal["ensemble", "valid", "test"]) -> bool: """Query for when the ens file was last modified""" - return self.ens_file.stat().st_mtime + # I didn't like the idea of putting this into a dict, feel free to change + if kind == "ensemble": + mtime = self.recorded_mtime_ensemble + elif kind == "valid": + mtime = self.recorded_mtime_valid + elif kind == "test": + mtime == self.recorded_mtime_test + else: + raise NotImplementedError() + + if mtime == 0: + raise ValueError(f"Run has no recorded time for {kind}: {self}") + + return self.pred_path(kind).stat().st_mtime == mtime + + def pred_path(self, kind: Literal["ensemble", "valid", "test"]) -> Path: + """Get the path to certain predictions""" + fname = f"predictions_{kind}_{self.seed}_{self.num_run}_{self.budget}.npy" + return self.dir / fname + + @property + def mem_usage(self) -> float: + if self._mem_usage is None: + self._mem_usage = round(sizeof(self.dir, unit="MB"), 2) + + return self._mem_usage @property def id(self) -> tuple[int, int, float]: @@ -75,10 +101,26 @@ def id(self) -> tuple[int, int, float]: def __str__(self) -> str: return f"{self.seed}_{self.num_run}_{self.budget}" + @staticmethod + def from_dir(dir: Path) -> Run: + """Creates a Run from a path point to the directory of a run -class EnsembleBuilder: + Parameters + ---------- + dir: Path + Expects something like /path/to/{seed}_{numrun}_budget + + Returns + ------- + Run + The run object generated from the directory + """ + name = path.name + seed, num_run, budget = name.split('_') + return Run(seed=seed, num_run=num_run, budget=budget, dir=dir) - model_fn_re = re.compile(MODEL_FN_RE) + +class EnsembleBuilder: def __init__( self, @@ -274,22 +316,9 @@ def runs(self) -> dict[str, Run]: if self._runs is None: self._runs = {} - if self.runs_path.exists(): - with self.runs_path.open("rb") as memory: - self._runs = pickle.load(memory) - - return self._runs - - @property - def run_ensemble_prediction_paths(self) -> list[str]: - """Get the all available predictions paths that the ensemble builder can use - - Returns - ------- - list[str] - The list of paths - """ - if self._run_prediction_paths is None: + # First read in all the runs on disk + rundir = Path(self.backend.get_runs_directory()) + runs_dirs = list(rundir.iterdir()) pred_path = os.path.join( glob.escape(self.backend.get_runs_directory()), "%d_*_*" % self.seed, @@ -305,6 +334,14 @@ def run_ensemble_prediction_paths(self) -> list[str]: return self._run_prediction_paths + + # Next, get the info about runs from last read + if self.runs_path.exists(): + with self.runs_path.open("rb") as memory: + previous_info = pickle.load(memory) + + return self._runs + @property def y_ensemble(self) -> np.ndarray | None: """The ensemble targets used for training the ensemble @@ -495,11 +532,8 @@ def main( train_pred, valid_pred, test_pred = None, None, None used_time = time.time() - self.start_time - self.logger.debug( - "Starting iteration %d, time left: %f", - iteration, - time_left - used_time, - ) + left_for_iter = time_left - used_time + self.logger.debug(f"Starting iteration {iteration}, time left: {left_for_iter}") # No predictions found, exit early if len(self.run_ensemble_prediction_paths) == 0: @@ -509,16 +543,7 @@ def main( # Can't load data, exit early if not os.path.exists(self.backend._get_targets_ensemble_filename()): self.logger.debug(f"No targets for ensemble: {traceback.format_exc()}") - if return_predictions: - return ( - self.ensemble_history, - self.ensemble_nbest, - train_pred, - valid_pred, - test_pred, - ) - else: - return self.ensemble_history, self.ensemble_nbest, None, None, None + return self.ensemble_history, self.ensemble_nbest, None, None, None self.compute_loss_per_model(targets=self.y_ensemble) @@ -645,10 +670,7 @@ def main( else: return self.ensemble_history, self.ensemble_nbest, None, None, None - def compute_loss_per_model( - self, - targets: np.ndarray, - ) -> None: + def compute_loss_per_model(self, targets: np.ndarray) -> None: """Compute the loss of the predictions on ensemble building data set; populates self.run_predictions and self.runs @@ -667,44 +689,44 @@ def compute_loss_per_model( # First sort files chronologically to_read = [] - for y_ens_fn in self.run_ensemble_prediction_paths: - match = self.model_fn_re.search(y_ens_fn) + for pred_path in self.run_ensemble_prediction_paths: + match = self.model_fn_re.search(pred_path) _seed = int(match.group(1)) _num_run = int(match.group(2)) _budget = float(match.group(3)) - mtime = os.path.getmtime(y_ens_fn) + mtime = os.path.getmtime(pred_path) - to_read.append([y_ens_fn, match, _seed, _num_run, _budget, mtime]) + to_read.append([pred_path, match, _seed, _num_run, _budget, mtime]) n_read_files = 0 # Now read file wrt to num_run - for y_ens_fn, match, _seed, _num_run, _budget, mtime in sorted( + for pred_path, match, _seed, _num_run, _budget, mtime in sorted( to_read, key=lambda x: x[5] ): # Break out if we've read more files than we should - if self.read_at_most and n_read_files >= self.read_at_most: + if self.read_at_most is not None and n_read_files >= self.read_at_most: break - if not y_ens_fn.endswith(".npy"): - self.logger.warning(f"Error loading file (not .npy): {y_ens_fn}") + if not pred_path.endswith(".npy"): + self.logger.warning(f"Error loading file (not .npy): {pred_path}") continue # Get the run, creating one if it doesn't exist - if y_ens_fn not in self.runs: + if pred_path not in self.runs: run = Run( seed=_seed, num_run=_num_run, budget=_budget, - ens_file=y_ens_fn, + ens_file=pred_path, ) - self.runs[y_ens_fn] = run + self.runs[pred_path] = run else: - run = self.runs[y_ens_fn] + run = self.runs[pred_path] # Put an entry in for the predictions if it doesn't exist - if y_ens_fn not in self.run_predictions: - self.run_predictions[y_ens_fn] = { + if pred_path not in self.run_predictions: + self.run_predictions[pred_path] = { Y_ENSEMBLE: None, Y_VALID: None, Y_TEST: None, @@ -716,7 +738,7 @@ def compute_loss_per_model( # actually read the predictions and compute their respective loss try: - y_ensemble = self._predictions_from(y_ens_fn) + y_ensemble = self._predictions_from(pred_path) loss = calculate_loss( solution=targets, prediction=y_ensemble, @@ -727,8 +749,8 @@ def compute_loss_per_model( if np.isfinite(run.loss): self.logger.debug( - f"Changing ensemble loss for file {y_ens_fn} from {run.loss} to" - f"{loss} because file modification time changed?" + f"Changing ensemble loss for file {pred_path} from {run.loss}" + f" to {loss} because file modification time changed?" f"{run.mtime_ens} -> {run.last_modified()}" ) @@ -736,14 +758,16 @@ def compute_loss_per_model( # It is not needed to create the object here # To save memory, we just compute the loss. - run.mtime_ens = os.path.getmtime(y_ens_fn) + run.mtime_ens = os.path.getmtime(pred_path) run.loaded = 2 - run.mem_usage = round(sizeof(y_ens_fn, unit="MB"), 2) + run.mem_usage = run.mem_usage() n_read_files += 1 except Exception: - self.logger.warning(f"Err loading {y_ens_fn}: {traceback.format_exc()}") + self.logger.warning( + f"Err loading {pred_path}: {traceback.format_exc()}" + ) run.loss = np.inf n_files_read = sum([run.loaded > 0 for run in self.runs.values()]) @@ -1264,11 +1288,7 @@ def _predictions_from(self, path: str | Path) -> np.ndarray: # predictions are being saved as pickled predictions = np.load(f, allow_pickle=True) - dtypes = { - 16: np.float16, - 32: np.float32, - 64: np.float64, - } + dtypes = {16: np.float16, 32: np.float32, 64: np.float64} dtype = dtypes.get(precision, predictions.dtype) predictions = predictions.astype(dtype=dtype, copy=False) From 0d459ef8b01780bbeef6c06cef16a0202b4dd5f1 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 1 Apr 2022 17:28:54 +0200 Subject: [PATCH 044/117] Add method to `Run` to allow recording of last modified --- autosklearn/ensemble_building/builder.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index ad22aef139..40773162ba 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -60,6 +60,11 @@ class Run: # 3 - deleted from disk due to space constraints loaded: int = 0 + @property + def mem_usage(self) -> float: + if self._mem_usage is None: + self._mem_usage = round(sizeof(self.dir, unit="MB"), 2) + def is_dummy(self) -> bool: """Whether this run is a dummy run or not""" return self.num_run == 1 @@ -86,10 +91,11 @@ def pred_path(self, kind: Literal["ensemble", "valid", "test"]) -> Path: fname = f"predictions_{kind}_{self.seed}_{self.num_run}_{self.budget}.npy" return self.dir / fname - @property - def mem_usage(self) -> float: - if self._mem_usage is None: - self._mem_usage = round(sizeof(self.dir, unit="MB"), 2) + def record_modified_times(self) -> None: + self.recorded_mtime_ensemble = self.pred_path("ensemble").stat().st_mtime + self.recorded_mtime_valid = self.pred_path("valid").stat().st_mtime + self.recorded_mtime_test = self.pred_path("test").stat().st_mtime + return self._mem_usage From 292394690d214a2cb2f4e9d1514dc732d2d0fe5f Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 1 Apr 2022 17:36:03 +0200 Subject: [PATCH 045/117] Change Run mtimes to dictionary --- autosklearn/ensemble_building/builder.py | 73 +++++++++++------------- 1 file changed, 32 insertions(+), 41 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 40773162ba..0571f49058 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -38,6 +38,9 @@ Y_TEST = 2 +RunID = tuple[int, int, float] + + @dataclass class Run: """Dataclass for storing information about a run""" @@ -50,9 +53,7 @@ class Run: loss: float = np.inf _mem_usage: int | None = None # The recorded time of ensemble/test/valid predictions modified - recorded_mtime_ensemble: float = 0 - recorded_mtime_test: float = 0 - recorded_mtime_valid: float = 0 + recorded_mtimes: dict[str, float] = 0 # Lazy keys so far: # 0 - not loaded # 1 - loaded and in memory @@ -62,29 +63,28 @@ class Run: @property def mem_usage(self) -> float: + """The memory usage of this run based on it's directory""" if self._mem_usage is None: self._mem_usage = round(sizeof(self.dir, unit="MB"), 2) + return self._mem_usage + def is_dummy(self) -> bool: """Whether this run is a dummy run or not""" return self.num_run == 1 def was_modified(self, kind: Literal["ensemble", "valid", "test"]) -> bool: """Query for when the ens file was last modified""" - # I didn't like the idea of putting this into a dict, feel free to change - if kind == "ensemble": - mtime = self.recorded_mtime_ensemble - elif kind == "valid": - mtime = self.recorded_mtime_valid - elif kind == "test": - mtime == self.recorded_mtime_test - else: - raise NotImplementedError() + if self.recorded_mtimes is None: + raise RuntimeError("No times were recorded, use `record_modified_times`") - if mtime == 0: + if kind not in self.recorded_mtimes: raise ValueError(f"Run has no recorded time for {kind}: {self}") - return self.pred_path(kind).stat().st_mtime == mtime + recorded = self.recorded_mtimes[kind] + last = self.pred_path(kind).stat().st_mtime + + return recorded == last def pred_path(self, kind: Literal["ensemble", "valid", "test"]) -> Path: """Get the path to certain predictions""" @@ -92,15 +92,14 @@ def pred_path(self, kind: Literal["ensemble", "valid", "test"]) -> Path: return self.dir / fname def record_modified_times(self) -> None: - self.recorded_mtime_ensemble = self.pred_path("ensemble").stat().st_mtime - self.recorded_mtime_valid = self.pred_path("valid").stat().st_mtime - self.recorded_mtime_test = self.pred_path("test").stat().st_mtime - - - return self._mem_usage + """Records the last time each prediction file type was modified, if it exists""" + for kind in ["ensemble", "valid", "test"]: + path = self.pred_path(kind) + if path.exists(): + self.recorded_mtimes[kind] = path.stat().st_mtime() @property - def id(self) -> tuple[int, int, float]: + def id(self) -> RunID: """Get the three components of it's id""" return self.seed, self.num_run, self.budget @@ -121,7 +120,7 @@ def from_dir(dir: Path) -> Run: Run The run object generated from the directory """ - name = path.name + name = dir.name seed, num_run, budget = name.split('_') return Run(seed=seed, num_run=num_run, budget=budget, dir=dir) @@ -323,28 +322,20 @@ def runs(self) -> dict[str, Run]: self._runs = {} # First read in all the runs on disk - rundir = Path(self.backend.get_runs_directory()) - runs_dirs = list(rundir.iterdir()) - pred_path = os.path.join( - glob.escape(self.backend.get_runs_directory()), - "%d_*_*" % self.seed, - "predictions_ensemble_%s_*_*.npy*" % self.seed, - ) - y_ens_files = glob.glob(pred_path) - y_ens_files = [ - y_ens_file - for y_ens_file in y_ens_files - if y_ens_file.endswith(".npy") or y_ens_file.endswith(".npy.gz") - ] - self._run_prediction_paths = y_ens_files - - return self._run_prediction_paths + runs_dir = Path(self.backend.get_runs_directory()) + all_runs = [Run.from_dir(dir) for dir in runs_dir.iterdir()] - - # Next, get the info about runs from last read + # Next, get the info about runs from last read, if any + loaded_runs: dict[str, Run] = {} if self.runs_path.exists(): with self.runs_path.open("rb") as memory: - previous_info = pickle.load(memory) + loaded_runs = pickle.load(memory) + + # Update + for run in all_runs: + if run.id not in loaded_runs: + pass + return self._runs From 2ea36c1e5c44c4652bb33cc6787079e94843b4ab Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 1 Apr 2022 18:04:53 +0200 Subject: [PATCH 046/117] Change `compute_loss_per_model` to use new Run dataclass --- autosklearn/ensemble_building/builder.py | 148 ++++++++++------------- 1 file changed, 67 insertions(+), 81 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 0571f49058..ce62cd2ffb 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -2,13 +2,11 @@ from typing import Any -import glob import logging.handlers import multiprocessing import numbers import os import pickle -import re import shutil import time import traceback @@ -50,7 +48,7 @@ class Run: ens_file: str dir: Path budget: float = 0.0 - loss: float = np.inf + loss: float | None = None _mem_usage: int | None = None # The recorded time of ensemble/test/valid predictions modified recorded_mtimes: dict[str, float] = 0 @@ -73,7 +71,7 @@ def is_dummy(self) -> bool: """Whether this run is a dummy run or not""" return self.num_run == 1 - def was_modified(self, kind: Literal["ensemble", "valid", "test"]) -> bool: + def pred_modified(self, kind: Literal["ensemble", "valid", "test"]) -> bool: """Query for when the ens file was last modified""" if self.recorded_mtimes is None: raise RuntimeError("No times were recorded, use `record_modified_times`") @@ -98,6 +96,40 @@ def record_modified_times(self) -> None: if path.exists(): self.recorded_mtimes[kind] = path.stat().st_mtime() + def predictions( + self, + kind: Literal["ensemble", "valid", "test"], + precisions: type | None = None + ) -> Path: + """Load the predictions for this run + + Parameters + ---------- + kind : Literal["ensemble", "valid", "test"] + The kind of predictions to load + + precisions : type | None = None + What kind of precision reduction to apply + + Returns + ------- + np.ndarray + The loaded predictions + """ + path = self.pred_path(kind) + precision = self.precision + + with path.open("rb") as f: + # TODO: We should probably remove this requirement. I'm not sure why model + # predictions are being saved as pickled + predictions = np.load(f, allow_pickle=True) + + dtypes = {16: np.float16, 32: np.float32, 64: np.float64} + dtype = dtypes.get(precision, predictions.dtype) + predictions = predictions.astype(dtype=dtype, copy=False) + + return predictions + @property def id(self) -> RunID: """Get the three components of it's id""" @@ -121,12 +153,11 @@ def from_dir(dir: Path) -> Run: The run object generated from the directory """ name = dir.name - seed, num_run, budget = name.split('_') + seed, num_run, budget = name.split("_") return Run(seed=seed, num_run=num_run, budget=budget, dir=dir) class EnsembleBuilder: - def __init__( self, backend: Backend, @@ -316,26 +347,26 @@ def last_hash(self) -> str: return self._last_hash @property - def runs(self) -> dict[str, Run]: + def runs(self) -> dict[RunID, Run]: """Get the cached information from previous runs""" if self._runs is None: - self._runs = {} - # First read in all the runs on disk runs_dir = Path(self.backend.get_runs_directory()) all_runs = [Run.from_dir(dir) for dir in runs_dir.iterdir()] # Next, get the info about runs from last read, if any - loaded_runs: dict[str, Run] = {} + loaded_runs: dict[RunID, Run] = {} if self.runs_path.exists(): with self.runs_path.open("rb") as memory: loaded_runs = pickle.load(memory) - # Update + # Update any run that was loaded but we didn't have previously for run in all_runs: if run.id not in loaded_runs: - pass + run.record_modified_times() # Record the times it was last modified + loaded_runs[run.id] = run + self._runs = loaded_runs return self._runs @@ -684,93 +715,64 @@ def compute_loss_per_model(self, targets: np.ndarray) -> None: """ self.logger.debug("Read ensemble data set predictions") - # First sort files chronologically - to_read = [] - for pred_path in self.run_ensemble_prediction_paths: - match = self.model_fn_re.search(pred_path) - _seed = int(match.group(1)) - _num_run = int(match.group(2)) - _budget = float(match.group(3)) - mtime = os.path.getmtime(pred_path) - - to_read.append([pred_path, match, _seed, _num_run, _budget, mtime]) + by_last_modified = lambda run: run.recorded_mtimes["ensemble"] + # Now read file, sorted by when their ensemble predicitons were last modified n_read_files = 0 - # Now read file wrt to num_run - for pred_path, match, _seed, _num_run, _budget, mtime in sorted( - to_read, key=lambda x: x[5] - ): + for run in sorted(self.runs.values(), key=by_last_modified): # Break out if we've read more files than we should if self.read_at_most is not None and n_read_files >= self.read_at_most: break - if not pred_path.endswith(".npy"): - self.logger.warning(f"Error loading file (not .npy): {pred_path}") + if not run.pred_path("ensemble").exists(): + self.logger.warning(f"No ensemble predictions for {run}") continue - # Get the run, creating one if it doesn't exist - if pred_path not in self.runs: - run = Run( - seed=_seed, - num_run=_num_run, - budget=_budget, - ens_file=pred_path, - ) - self.runs[pred_path] = run - else: - run = self.runs[pred_path] - # Put an entry in for the predictions if it doesn't exist - if pred_path not in self.run_predictions: - self.run_predictions[pred_path] = { + if run.id not in self.run_predictions: + self.run_predictions[run.id] = { Y_ENSEMBLE: None, Y_VALID: None, Y_TEST: None, } # If the timestamp is the same, nothing's changed so we can move on - if run.mtime_ens == mtime: + if not run.pred_modified("ensemble"): continue - # actually read the predictions and compute their respective loss + # Actually read the predictions and compute their respective loss try: - y_ensemble = self._predictions_from(pred_path) + ensemble_predictions = run.predictions("ensemble") loss = calculate_loss( solution=targets, - prediction=y_ensemble, + prediction=ensemble_predictions, task_type=self.task_type, metric=self.metric, scoring_functions=None, ) - - if np.isfinite(run.loss): + except Exception: + self.logger.error( + f"Error ensemble predictions for {run}: {traceback.format_exc()}" + ) + loss = np.inf + finally: + # This is not a case we should reach, when should there be a reason + # that the loss gets updated twice? + if run.loss is not None: self.logger.debug( - f"Changing ensemble loss for file {pred_path} from {run.loss}" - f" to {loss} because file modification time changed?" + f"Changing ensemble loss for {run} to {loss} because file." + f"modification time changed?" f"{run.mtime_ens} -> {run.last_modified()}" ) - run.loss = loss - - # It is not needed to create the object here - # To save memory, we just compute the loss. - run.mtime_ens = os.path.getmtime(pred_path) run.loaded = 2 - run.mem_usage = run.mem_usage() - n_read_files += 1 - except Exception: - self.logger.warning( - f"Err loading {pred_path}: {traceback.format_exc()}" - ) - run.loss = np.inf - - n_files_read = sum([run.loaded > 0 for run in self.runs.values()]) + n_read_total = sum([run.loaded > 0 for run in self.runs.values()]) self.logger.debug( f"Done reading {n_read_files} new prediction files." - f"Loaded {n_files_read} predictions in total." + f"Loaded {n_read_total} predictions in total." ) def get_n_best_preds(self) -> list[str]: @@ -1274,19 +1276,3 @@ def _delete_excess_models(self, selected_keys: list[str]) -> None: f" to error {e}", ) - def _predictions_from(self, path: str | Path) -> np.ndarray: - if isinstance(path, str): - path = Path(path) - - precision = self.precision - - with path.open("rb") as f: - # TODO: We should probably remove this requirement. I'm not sure why model - # predictions are being saved as pickled - predictions = np.load(f, allow_pickle=True) - - dtypes = {16: np.float16, 32: np.float32, 64: np.float64} - dtype = dtypes.get(precision, predictions.dtype) - predictions = predictions.astype(dtype=dtype, copy=False) - - return predictions From 3dacc39dd70a03f2cb7e92f5bcc10e4044b83df4 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 1 Apr 2022 19:17:02 +0200 Subject: [PATCH 047/117] Factor out run loss into main loop --- autosklearn/ensemble_building/builder.py | 216 ++++++++++++++--------- 1 file changed, 128 insertions(+), 88 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index ce62cd2ffb..e34acedb1d 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -11,7 +11,7 @@ import time import traceback import zlib -from dataclasses import dataclass +from dataclasses import dataclass, field from pathlib import Path import numpy as np @@ -99,7 +99,7 @@ def record_modified_times(self) -> None: def predictions( self, kind: Literal["ensemble", "valid", "test"], - precisions: type | None = None + precisions: type | None = None, ) -> Path: """Load the predictions for this run @@ -154,7 +154,9 @@ def from_dir(dir: Path) -> Run: """ name = dir.name seed, num_run, budget = name.split("_") - return Run(seed=seed, num_run=num_run, budget=budget, dir=dir) + run = Run(seed=seed, num_run=num_run, budget=budget, dir=dir) + run.record_modified_times() + return run class EnsembleBuilder: @@ -171,7 +173,7 @@ def __init__( seed: int = 1, precision: int = 32, memory_limit: int | None = 1024, - read_at_most: int = 5, + read_at_most: int | None = 5, logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, random_state: int | np.random.RandomState | None = None, ): @@ -234,7 +236,7 @@ def __init__( memory_limit: int | None = 1024 memory limit in mb. If ``None``, no memory limit is enforced. - read_at_most: int = 5 + read_at_most: int | None = 5 read at most n new prediction files in each iteration logger_port: int = DEFAULT_TCP_LOGGING_PORT @@ -252,6 +254,9 @@ def __init__( if max_models_on_disc is not None and max_models_on_disc < 0: raise ValueError("max_models_on_disc must be positive or None") + if read_at_most is not None and read_at_most < 1: + raise ValueError("Read at most must be greater than 1") + # Setup the logger self.logger = get_named_client_logger(name="EnsembleBuilder", port=logger_port) self.logger_port = logger_port @@ -289,8 +294,8 @@ def __init__( # Data we may need datamanager = self.backend.load_datamanager() - self.y_valid: np.ndarray | None = datamanager.data.get("Y_valid", None) - self.y_test: np.ndarray | None = datamanager.data.get("Y_test", None) + self._y_valid: np.ndarray | None = datamanager.data.get("Y_valid", None) + self._y_test: np.ndarray | None = datamanager.data.get("Y_test", None) self._y_ensemble: np.ndarray | None = None # Cached items, loaded by properties @@ -347,14 +352,14 @@ def last_hash(self) -> str: return self._last_hash @property - def runs(self) -> dict[RunID, Run]: + def runs(self) -> list[Run]: """Get the cached information from previous runs""" if self._runs is None: # First read in all the runs on disk runs_dir = Path(self.backend.get_runs_directory()) all_runs = [Run.from_dir(dir) for dir in runs_dir.iterdir()] - # Next, get the info about runs from last read, if any + # Next, get the info about runs from last EnsembleBulder run, if any loaded_runs: dict[RunID, Run] = {} if self.runs_path.exists(): with self.runs_path.open("rb") as memory: @@ -363,15 +368,13 @@ def runs(self) -> dict[RunID, Run]: # Update any run that was loaded but we didn't have previously for run in all_runs: if run.id not in loaded_runs: - run.record_modified_times() # Record the times it was last modified loaded_runs[run.id] = run self._runs = loaded_runs - return self._runs + return list(self._runs.values()) - @property - def y_ensemble(self) -> np.ndarray | None: + def targets(self, kind: Literal["ensemble", "valid", "test"]) -> np.ndarray | None: """The ensemble targets used for training the ensemble It will attempt to load and cache them in memory but @@ -382,11 +385,19 @@ def y_ensemble(self) -> np.ndarray | None: np.ndarray | None The ensemble targets, if they can be loaded """ - if self._y_ensemble is None: + if kind == "ensemble" and self._y_ensemble is None: if os.path.exists(self.backend._get_targets_ensemble_filename()): self._y_ensemble = self.backend.load_targets_ensemble() + return self._y_ensemble + + elif kind == "valid": + return self._y_valid - return self._y_ensemble + elif kind == "test": + return self._y_test + + else: + raise NotImplementedError() def run( self, @@ -548,9 +559,8 @@ def main( ------- (ensemble_history, nbest, train_preds, valid_preds, test_preds) """ - # Pynisher jobs inside dask 'forget' - # the logger configuration. So we have to set it up - # accordingly + # Pynisher jobs inside dask 'forget' the logger configuration. + # So we have to set it up accordingly self.logger = get_named_client_logger( name="EnsembleBuilder", port=self.logger_port, @@ -563,17 +573,69 @@ def main( left_for_iter = time_left - used_time self.logger.debug(f"Starting iteration {iteration}, time left: {left_for_iter}") - # No predictions found, exit early - if len(self.run_ensemble_prediction_paths) == 0: - self.logger.debug("Found no predictions on ensemble data set") - return self.ensemble_history, self.ensemble_nbest, None, None, None - # Can't load data, exit early if not os.path.exists(self.backend._get_targets_ensemble_filename()): self.logger.debug(f"No targets for ensemble: {traceback.format_exc()}") return self.ensemble_history, self.ensemble_nbest, None, None, None - self.compute_loss_per_model(targets=self.y_ensemble) + # Get our runs + runs = self.runs + + # No runs found, exit early + if len(self.runs) == 0: + self.logger.debug("Found no predictions on ensemble data set") + return self.ensemble_history, self.ensemble_nbest, None, None, None + + # We filter out all runs that don't have any predictions for the ensemble + has_predictions = [] + for run in runs: + if run.pred_path("ensemble").exists(): + has_predictions.append(run) + else: + self.logger.warning(f"No ensemble predictions for {run}") + + runs = has_predictions + + # Calculating losses + # + # We need to calculate the loss of runs for which we have not done so yet. + # To do so, we first find out which runs have a loss and have not had their + # predictions modified, filtering them out. + # We then compute the losses for the runs remaining, sorted by their + # last-modified time, such that oldest are computed first. We only compute + # a certain amount of these to ensure that we don't spend to much time + # reading and computing losses. + # + # Filter runs that need their losses computed + runs_to_compute_loss = [] + for run in runs: + if run.loss is None or run.loss == np.inf: + runs_to_compute_loss.append(run) + + elif run.loss is not None and run.pred_modified("ensemble"): + self.logger.debug(f"{run.id} had its predictions modified?") + runs_to_compute_loss.append(run) + + # Sort by last modified + by_last_modified = lambda r: r.record_mtimes["ensemble"] + runs_to_compute_loss = sorted(runs_to_compute_loss, key=by_last_modified) + + # Limit them if needed + if self.read_at_most is not None: + runs_to_compute_loss = runs_to_compute_loss[: self.read_at_most] + + # Calculate their losses + ensemble_targets = self.targets("ensemble") + for run in runs_to_compute_loss: + loss = self.run_loss(run, targets=ensemble_targets, kind="ensemble") + run.loaded = 2 + run.loss = loss + + n_read_total = sum(run.loaded > 0 for run in runs) + self.logger.debug( + f"Done reading {len(runs_to_compute_loss)} new prediction files." + f"Loaded {n_read_total} predictions in total." + ) # Only the models with the n_best predictions are candidates # to be in the ensemble @@ -698,82 +760,61 @@ def main( else: return self.ensemble_history, self.ensemble_nbest, None, None, None - def compute_loss_per_model(self, targets: np.ndarray) -> None: - """Compute the loss of the predictions on ensemble building data set; - populates self.run_predictions and self.runs + def run_loss( + self, + run: Run, + targets: np.ndarray, + kind: Literal["ensemble", "val", "test"] = "ensemble", + ) -> None: + """Compute the loss of a run on a given set of targets - Side-effects - ------------ - * Populates - - `self.runs` with the new losses it calculated + NOTE + ---- + Still has a side effect of populating self.read_preds Parameters ---------- + run: Run + The run to calculate the loss of + targets: np.ndarray The targets for which to calculate the losses on. Typically the ensemble_targts. - """ - self.logger.debug("Read ensemble data set predictions") - - by_last_modified = lambda run: run.recorded_mtimes["ensemble"] - - # Now read file, sorted by when their ensemble predicitons were last modified - n_read_files = 0 - for run in sorted(self.runs.values(), key=by_last_modified): - # Break out if we've read more files than we should - if self.read_at_most is not None and n_read_files >= self.read_at_most: - break + targets: np.ndarray + The targets to compare against - if not run.pred_path("ensemble").exists(): - self.logger.warning(f"No ensemble predictions for {run}") - continue + kind: "ensemble" | "val" | "test" = "ensemble" + What kind of predicitons to laod from the Runs - # Put an entry in for the predictions if it doesn't exist - if run.id not in self.run_predictions: - self.run_predictions[run.id] = { - Y_ENSEMBLE: None, - Y_VALID: None, - Y_TEST: None, - } + """ + # Put an entry in for the predictions if it doesn't exist + if run.id not in self.run_predictions: + self.run_predictions[run.id] = { + Y_ENSEMBLE: None, + Y_VALID: None, + Y_TEST: None, + } - # If the timestamp is the same, nothing's changed so we can move on - if not run.pred_modified("ensemble"): - continue + try: + run_predictions = run.predictions("ensemble") + loss = calculate_loss( + solution=targets, + prediction=run_predictions, + task_type=self.task_type, + metric=self.metric, + scoring_functions=None, + ) - # Actually read the predictions and compute their respective loss - try: - ensemble_predictions = run.predictions("ensemble") - loss = calculate_loss( - solution=targets, - prediction=ensemble_predictions, - task_type=self.task_type, - metric=self.metric, - scoring_functions=None, - ) - except Exception: - self.logger.error( - f"Error ensemble predictions for {run}: {traceback.format_exc()}" - ) - loss = np.inf - finally: - # This is not a case we should reach, when should there be a reason - # that the loss gets updated twice? - if run.loss is not None: - self.logger.debug( - f"Changing ensemble loss for {run} to {loss} because file." - f"modification time changed?" - f"{run.mtime_ens} -> {run.last_modified()}" - ) - run.loss = loss - run.loaded = 2 - n_read_files += 1 + except Exception: + self.logger.error( + f"Error {kind} predictions for {run}:" f" {traceback.format_exc()}" + ) + loss = np.inf - n_read_total = sum([run.loaded > 0 for run in self.runs.values()]) - self.logger.debug( - f"Done reading {n_read_files} new prediction files." - f"Loaded {n_read_total} predictions in total." - ) + finally: + run.loss = loss + run.loaded = 2 def get_n_best_preds(self) -> list[str]: """Get best n predictions according to the loss on the "ensemble set" @@ -1275,4 +1316,3 @@ def _delete_excess_models(self, selected_keys: list[str]) -> None: f"Failed to delete files of non-candidate model {pred_path} due" f" to error {e}", ) - From 2b5e47906bcea4f8a1a376f894ca51afe5fa7679 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sat, 2 Apr 2022 00:21:26 +0200 Subject: [PATCH 048/117] Simplyify get_nbest and compute_losses --- autosklearn/ensemble_building/builder.py | 312 +++++++++++------------ autosklearn/util/functional.py | 80 +++++- 2 files changed, 225 insertions(+), 167 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index e34acedb1d..9322a5050a 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any +from typing import Any, Sequence import logging.handlers import multiprocessing @@ -12,6 +12,7 @@ import traceback import zlib from dataclasses import dataclass, field +from itertools import accumulate from pathlib import Path import numpy as np @@ -27,7 +28,7 @@ from autosklearn.ensembles.ensemble_selection import EnsembleSelection from autosklearn.metrics import Scorer, calculate_loss, calculate_score from autosklearn.util.disk import sizeof -from autosklearn.util.functional import intersection +from autosklearn.util.functional import bound, findwhere, intersection, itersplit from autosklearn.util.logging_ import get_named_client_logger from autosklearn.util.parallel import preload_modules @@ -99,7 +100,7 @@ def record_modified_times(self) -> None: def predictions( self, kind: Literal["ensemble", "valid", "test"], - precisions: type | None = None, + precision: type | None = None, ) -> Path: """Load the predictions for this run @@ -117,7 +118,6 @@ def predictions( The loaded predictions """ path = self.pred_path(kind) - precision = self.precision with path.open("rb") as f: # TODO: We should probably remove this requirement. I'm not sure why model @@ -280,9 +280,6 @@ def __init__( self.max_models_on_disc = max_models_on_disc self.performance_range_threshold = performance_range_threshold - # max_resident_models keeps the maximum number of models in disc - self.max_resident_models: int | None = None - # The starting time of the procedure self.start_time = 0 @@ -298,6 +295,10 @@ def __init__( self._y_test: np.ndarray | None = datamanager.data.get("Y_test", None) self._y_ensemble: np.ndarray | None = None + # max_resident_models keeps the maximum number of models in disc + # Calculated during `main` + self.max_resident_models: int | None = None + # Cached items, loaded by properties # Check the corresponing properties for descriptions self._run_prediction_paths: list[str] | None = None @@ -587,24 +588,25 @@ def main( return self.ensemble_history, self.ensemble_nbest, None, None, None # We filter out all runs that don't have any predictions for the ensemble - has_predictions = [] - for run in runs: - if run.pred_path("ensemble").exists(): - has_predictions.append(run) - else: - self.logger.warning(f"No ensemble predictions for {run}") + with_predictions, without_predictions = itersplit( + runs, func=lambda r: r.pred_path("ensemble").exists() + ) + + if len(without_predictions) > 0: + self.logger.warn(f"Have no ensemble predictions for {without_predictions}") - runs = has_predictions + runs = with_predictions # Calculating losses # # We need to calculate the loss of runs for which we have not done so yet. - # To do so, we first find out which runs have a loss and have not had their - # predictions modified, filtering them out. + # To do so, we first filter out runs that already have a loss + # and have not had their predictions modified. + # # We then compute the losses for the runs remaining, sorted by their # last-modified time, such that oldest are computed first. We only compute - # a certain amount of these to ensure that we don't spend to much time - # reading and computing losses. + # `self.read_at_most` of them, if specified, to ensure we don't spend too much + # time reading and computing losses. # # Filter runs that need their losses computed runs_to_compute_loss = [] @@ -639,7 +641,7 @@ def main( # Only the models with the n_best predictions are candidates # to be in the ensemble - candidate_models = self.get_n_best_preds() + candidate_models = self.get_nbest() if not candidate_models: # no candidates yet if return_predictions: return ( @@ -797,7 +799,7 @@ def run_loss( } try: - run_predictions = run.predictions("ensemble") + run_predictions = run.predictions("ensemble", precision=self.precision) loss = calculate_loss( solution=targets, prediction=run_predictions, @@ -816,7 +818,11 @@ def run_loss( run.loss = loss run.loaded = 2 - def get_n_best_preds(self) -> list[str]: + def get_nbest( + self, + runs: Sequence[Run], + nbest: int | None = None, + ) -> list[Run]: """Get best n predictions according to the loss on the "ensemble set" Side effects: @@ -830,180 +836,156 @@ def get_n_best_preds(self) -> list[str]: Returns the paths of the selected models which are used as keys in `run_predictions` and `runs` """ - # Sort by loss as priority 1 and then by num_run on a ascending order - # We want small id first - keys = [(path, run.loss, run.num_run) for path, run in self.runs.items()] - sorted_keys = sorted(keys, key=lambda x: (x[1], x[2])) - - # number of models available - num_keys = len(sorted_keys) - # remove all that are at most as good as random - # note: dummy model must have run_id=1 (there is no run_id=0) - dummy_losses = list(filter(lambda x: x[2] == 1, sorted_keys)) - - # number of dummy models - num_dummy = len(dummy_losses) - dummy_loss = dummy_losses[0] - self.logger.debug("Use %f as dummy loss" % dummy_loss[1]) - - # sorted_keys looks like: (k, v["ens_loss"], v["num_run"]) - # On position 1 we have the loss of a minimization problem. - # keep only the predictions with a loss smaller than the dummy - # prediction - sorted_keys = filter(lambda x: x[1] < dummy_loss[1], sorted_keys) - - # remove Dummy Classifier - sorted_keys = list(filter(lambda x: x[2] > 1, sorted_keys)) - if not sorted_keys: - # no model left; try to use dummy loss (num_run==0) - # log warning when there are other models but not better than dummy model - if num_keys > num_dummy: + if nbest is None: + nbest = self.ensemble_nbest + + # Getting the candidates + # + # First we must split out dummy runs and real runs. We sort the dummy + # runs to then remove any real ones that are worse than the best dummy. + # If this removes all viable candidates, then we reinclude dummy runs + # as being viable candidates. + # + dummies, real = itersplit(runs, func=lambda r: r.is_dummy()) + + if len(dummies) == 0: + raise ValueError("We always expect a dummy run, i.e. a run with num_run=1") + + dummy_loss = sorted(dummies)[0].loss + self.logger.debug(f"Using {dummy_loss} to filter candidates") + + candidates = [r for r in real if r.loss < dummy_loss] + + # If there are no candidates left, use the dummies + if len(candidates) == 0: + if len(real) > len(dummies): self.logger.warning( "No models better than random - using Dummy loss!" - "Number of models besides current dummy model: %d. " - "Number of dummy models: %d", - num_keys - 1, - num_dummy, + f"\n\tNumber of models besides current dummy model: {len(real)}" + f"\n\tNumber of dummy models: {len(dummies)}", ) - sorted_keys = [ - (path, run.loss, run.num_run) - for path, run in self.runs.items() - if run.seed == self.seed and run.is_dummy() - ] - # reload predictions if losses changed over time and a model is - # considered to be in the top models again! - if not isinstance(self.ensemble_nbest, numbers.Integral): - # Transform to number of models to keep. Keep at least one - keep_nbest = max( - 1, min(len(sorted_keys), int(len(sorted_keys) * self.ensemble_nbest)) - ) - self.logger.debug( - "Library pruning: using only top %f percent of the models for ensemble " - "(%d out of %d)", - self.ensemble_nbest * 100, - keep_nbest, - len(sorted_keys), - ) + + candidates = [d for d in dummies if d.seed == self.seed] + + # Sort the candidates by lowest loss first and then lowest numrun going forward + candidates = sorted(candidates, key=lambda r: (r.loss, r.num_run)) + + # Calculate `keep_nbest` to determine how many models to keep + # + # 1. First we use the parameter `ensemble_nbest` to determine a base + # size of how many to keep, `int` being absolute and float being + # percentage of the available candidates. + # + # 2. If `max_models_on_disc` was an int, we can take this to be absolute. + # Otherwise, we take it to be a memory *cutoff*. We also add some buffer + # to the *cutoff*, essentially giving us that the *cutoff* is + # + # cutoff = max_models_on_disc - size_of_largest_model + # + # We use the fact models are sorted based on loss, from best to worst, + # and we calculate the cumulative memory cost. From this, we determine + # how many of the best models we can keep before we go over this *cutoff*. + # This is called the `max_resident_models`. + # + # 3. Finally, we take the smaller of the two from step 1. and 2. to determine + # the amount of models to keep + # + # Use `ensemble_n_best` + n_candidates = len(candidates) + if isinstance(self.ensemble_nbest, int): + keep_nbest = min(self.ensemble_nbest, n_candidates) else: - # Keep only at most ensemble_nbest - keep_nbest = min(self.ensemble_nbest, len(sorted_keys)) - self.logger.debug(f"Using {keep_nbest} of total {len(sorted_keys)} models") - - # If max_models_on_disc is None, do nothing - # One can only read at most max_models_on_disc models - if self.max_models_on_disc is not None: - if not isinstance(self.max_models_on_disc, numbers.Integral): - consumption = [ - ( - run.loss, - run.mem_usage, - ) - for run in self.runs.values() - if run.mem_usage is not None - ] - max_consumption = max(c[1] for c in consumption) - - # We are pessimistic with the consumption limit indicated by - # max_models_on_disc by 1 model. Such model is assumed to spend - # max_consumption megabytes - if ( - sum(c[1] for c in consumption) + max_consumption - ) > self.max_models_on_disc: - - # just leave the best -- smaller is better! - # This list is in descending order, to preserve the best models - sorted_cum_consumption = ( - np.cumsum([c[1] for c in list(sorted(consumption))]) - + max_consumption - ) - max_models = np.argmax( - sorted_cum_consumption > self.max_models_on_disc - ) + val = n_candidates * self.ensemble_nbest + keep_nbest = int(bound(val, low=1, high=n_candidates)) - # Make sure that at least 1 model survives - self.max_resident_models = max(1, max_models) - self.logger.warning( - "Limiting num of models via float max_models_on_disc={}" - " as accumulated={} worst={} num_models={}".format( - self.max_models_on_disc, - (sum(c[1] for c in consumption) + max_consumption), - max_consumption, - self.max_resident_models, - ) - ) - else: - self.max_resident_models = None + percent = keep_nbest / n_candidates + self.logger.debug(f"Using top {keep_nbest} of {n_candidates} ({percent:.2%})") + + # Determine `max_resident_models` + self.max_resident_models = self.max_models_on_disc + if isinstance(self.max_resident_models, float): + largest_mem = max(candidates, key=lambda r: r.mem_usage) + cutoff = self.max_models_on_disc - largest_mem + + total = sum(r.mem_usage for r in candidates) + if total <= cutoff: + self.max_resident_models = None else: - self.max_resident_models = self.max_models_on_disc + # Index of how many models before we go over the cutoff + mem_usage_for_n_models = accumulate(r.mem_usage for r in candidates) + max_models = findwhere( + mem_usage_for_n_models, + lambda cost: cost > cutoff, + default=len(candidates), + ) + + # Ensure we always at least have 1, even if the very first + # model would have put us over the cutoff + self.max_resident_models = max(1, max_models) + + self.logger.warning( + f"Limiting num of models via `max_models_on_disc` float" + f" max_models_on_disc={self.max_models_on_disc}" + f" cutoff={cutoff}" + f" worst={largest_mem}" + f" num_models={self.max_resident_models}" + ) if ( self.max_resident_models is not None - and keep_nbest > self.max_resident_models + and self.max_resident_models < keep_nbest ): self.logger.debug( - "Restricting the number of models to %d instead of %d due to argument " - "max_models_on_disc", - self.max_resident_models, - keep_nbest, + f"Restricting the number of models to {self.max_resident_models}" + f"instead of {keep_nbest} due to argument " ) keep_nbest = self.max_resident_models # consider performance_range_threshold + # + # if self.performance_range_threshold > 0: - best_loss = sorted_keys[0][1] - worst_loss = dummy_loss[1] - worst_loss -= (worst_loss - best_loss) * self.performance_range_threshold - if sorted_keys[keep_nbest - 1][1] > worst_loss: - # We can further reduce number of models - # since worst model is worse than thresh - for i in range(0, keep_nbest): - # Look at most at keep_nbest models, - # but always keep at least one model - current_loss = sorted_keys[i][1] - if current_loss >= worst_loss: - self.logger.debug( - "Dynamic Performance range: " - "Further reduce from %d to %d models", - keep_nbest, - max(1, i), - ) - keep_nbest = max(1, i) - break - ensemble_n_best = keep_nbest + best = runs[0].loss + cutoff = dummy_loss - (dummy_loss - best) * self.performance_range_threshold + + considered = candidates[:keep_nbest] + if considered[-1].loss > cutoff: + # Find the first run that is worse than the cutoff + cutoff_run_idx = findwhere( + considered, + lambda r: r.loss >= cutoff, + default=len(considered), + ) - # reduce to keys - sorted_keys = list(map(lambda x: x[0], sorted_keys)) + # Make sure we always keep at least 1 + keep_nbest = max(1, cutoff_run_idx) - # remove loaded predictions for non-winning models - for k in sorted_keys[ensemble_n_best:]: + keep, unload = candidates[:keep_nbest], candidates[keep_nbest:] - if k in self.run_predictions: - self.run_predictions[k][Y_ENSEMBLE] = None - self.run_predictions[k][Y_VALID] = None - self.run_predictions[k][Y_TEST] = None + # remove loaded predictions for non-winning models + for run in unload: + if run.id in self.run_predictions: + self.run_predictions[run.id][Y_ENSEMBLE] = None + self.run_predictions[run.id][Y_VALID] = None + self.run_predictions[run.id][Y_TEST] = None - run = self.runs[k] if run.loaded == 1: - self.logger.debug( - f"Dropping model {k} {run.seed}, {run.num_run} with loss {run.loss}" - ) + self.logger.debug(f"Dropping model {run}") run.loaded = 2 # Load the predictions for the winning - for k in sorted_keys[:ensemble_n_best]: - - run = self.runs[k] + for run in keep: if run.loaded != 3 and ( - k not in self.run_predictions - or self.run_predictions[k][Y_ENSEMBLE] is None + run.id not in self.run_predictions + or self.run_predictions[run.id][Y_ENSEMBLE] is None ): # No need to load valid and test here because they are loaded only if # the model ends up in the ensemble - self.run_predictions[k][Y_ENSEMBLE] = self._predictions_from(k) + predictions = run.predictions("ensemble", precision=self.precision) + self.run_predictions[run.id][Y_ENSEMBLE] = predictions run.loaded = 1 - # return keys of self.runs with lowest losses - return sorted_keys[:ensemble_n_best] + return keep def get_valid_test_preds( self, diff --git a/autosklearn/util/functional.py b/autosklearn/util/functional.py index 15d3577f28..4f21090d58 100644 --- a/autosklearn/util/functional.py +++ b/autosklearn/util/functional.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Iterable, Optional, TypeVar +from typing import Callable, Iterable, Iterator, TypeVar from functools import reduce @@ -9,7 +9,7 @@ T = TypeVar("T") -def normalize(x: np.ndarray, axis: Optional[int] = None) -> np.ndarray: +def normalize(x: np.ndarray, axis: int | None = None) -> np.ndarray: """Normalizes an array along an axis Note @@ -84,3 +84,79 @@ def intersection(*items: Iterable[T]) -> set[T]: return set() return set(reduce(lambda s1, s2: set(s1) & set(s2), items, items[0])) + + +def itersplit(lst: Iterable[T], func: Callable[[T], bool]) -> tuple[list[T], list[T]]: + """Split a list in two based on a predicate + + Parameters + ---------- + lst : Iterable[T] + The list to split + + func : Callable[[T], bool] + The predicate to split it on + + Returns + ------- + (a: list[T], b: list[T]) + Everything in a satisfies the func while nothing in b does + """ + a = [] + b = [] + for x in lst: + if func(x): + a.append(x) + else: + b.append(x) + + return a, b + + +def bound(val: float, *, low: float, high: float) -> float: + """Bounds a value between a low and high + + .. code:: python + + x = bound(14, low=0, high=13.1) + # x == 13.1 + + Parameters + ---------- + val : float + The value to bound + + low : float + The low to bound against + + high : float + The high to bound against + + Returns + ------- + float + The bounded value + """ + return max(low, min(val, high)) + + +def findwhere(itr: Iterable[T], func: Callable[[T], bool], *, default: int = -1) -> int: + """Find the index of the next occurence where func is True. + + Parameters + ---------- + itr : Iterable[T] + The iterable to search over + + func : Callable[[T], bool] + The function to use + + default : int = -1 + The default value to give if no value was found where func was True + + Returns + ------- + int + The index where func was True + """ + return next((i for i, t in enumerate(itr) if func(t)), default) From 4e1222be077e0c1ac5e0da471f32598683868487 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 3 Apr 2022 15:08:22 +0200 Subject: [PATCH 049/117] Major rewrite of ensemble builder main loop --- autosklearn/ensemble_building/builder.py | 426 +++++------------------ 1 file changed, 86 insertions(+), 340 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 9322a5050a..a509e2bf3b 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -11,7 +11,7 @@ import time import traceback import zlib -from dataclasses import dataclass, field +from dataclasses import dataclass from itertools import accumulate from pathlib import Path @@ -20,9 +20,6 @@ import pynisher from typing_extensions import Literal -from autosklearn.automl_common.common.ensemble_building.abstract_ensemble import ( # noqa: E501 - AbstractEnsemble, -) from autosklearn.automl_common.common.utils.backend import Backend from autosklearn.constants import BINARY_CLASSIFICATION from autosklearn.ensembles.ensemble_selection import EnsembleSelection @@ -138,6 +135,9 @@ def id(self) -> RunID: def __str__(self) -> str: return f"{self.seed}_{self.num_run}_{self.budget}" + def __hash__(self) -> int: + return hash(self.id) + @staticmethod def from_dir(dir: Path) -> Run: """Creates a Run from a path point to the directory of a run @@ -641,75 +641,65 @@ def main( # Only the models with the n_best predictions are candidates # to be in the ensemble - candidate_models = self.get_nbest() - if not candidate_models: # no candidates yet - if return_predictions: - return ( - self.ensemble_history, - self.ensemble_nbest, - train_pred, - valid_pred, - test_pred, - ) - else: - return self.ensemble_history, self.ensemble_nbest, None, None, None - - # populates predictions in self.run_predictions - # reduces selected models if file reading failed - n_sel_valid, n_sel_test = self.get_valid_test_preds( - selected_keys=candidate_models - ) + candidates = self.get_nbest() + if len(candidates) == 0: + return self.ensemble_history, self.ensemble_nbest, None, None, None # Get a set representation of them as we will begin doing intersections - candidates_set = set(candidate_models) - valid_set = set(n_sel_valid) - test_set = set(n_sel_test) - - # Both n_sel_* have entries, but there is no overlap, this is critical - if len(test_set) > 0 and len(valid_set) > 0 and len(valid_set & test_set) == 0: - self.logger.error("n_sel_valid and n_sel_test not empty but do not overlap") - if return_predictions: - return ( - self.ensemble_history, - self.ensemble_nbest, - train_pred, - valid_pred, - test_pred, - ) - else: - return self.ensemble_history, self.ensemble_nbest, None, None, None + # Not here that valid_set and test_set are both subsets of candidates_set + candidates_set = set(candidates) + valid_set = {r for r in candidates if r.pred_path("valid").exists()} + test_set = {r for r in candidates if r.pred_path("test").exists()} + + if len(valid_set & test_set) == 0 and len(test_set) > 0 and len(valid_set) > 0: + self.logger.error("valid_set and test_set not empty but do not overlap") + return self.ensemble_history, self.ensemble_nbest, None, None, None + # Find the intersect between the most groups and use that to fit the ensemble intersect = intersection(candidates_set, valid_set, test_set) if len(intersect) > 0: - candidate_models = sorted(list(intersect)) - n_sel_test = candidate_models - n_sel_valid = candidate_models + candidate_models, valid_models, test_models = sorted(list(intersect)) elif len(candidates_set & valid_set) > 0: - candidate_models = sorted(list(candidates_set & valid_set)) - n_sel_valid = candidate_models + candidate_models, valid_models = sorted(list(candidates_set & valid_set)) + test_models = [] elif len(candidates_set & test_set) > 0: - candidate_models = sorted(list(candidates_set & test_set)) - n_sel_test = candidate_models + candidate_models, test_models = sorted(list(candidates_set & test_set)) + valid_models = [] # This has to be the case else: - n_sel_test = [] - n_sel_valid = [] + test_models = [] + valid_models = [] # train ensemble ensemble = self.fit_ensemble(selected_keys=candidate_models) # Save the ensemble for later use in the main auto-sklearn module! if ensemble is not None: + self.logger.info(ensemble) + + ens_perf = ensemble.get_validation_performance() + self.validation_performance_ = min(self.validation_performance_, ens_perf) self.backend.save_ensemble(ensemble, iteration, self.seed) # Delete files of non-candidate models - can only be done after fitting the # ensemble and saving it to disc so we do not accidentally delete models in # the previous ensemble if self.max_resident_models is not None: - self._delete_excess_models(selected_keys=candidate_models) + to_delete = set(runs) - set(candidate_models) + to_delete = {r for r in to_delete if not r.is_dummy()} + for run in to_delete: + try: + shutil.rmtree(run.dir) + self.logger.info(f"Deleted files for {run}") + except Exception as e: + self.logger.error(f"Failed to delete files for {run}: \n{e}") + finally: + run.mem_usage = None + run.loaded = 3 + run.loss = np.inf # Save the read losses status for the next iteration, we should do this # before doing predictions as this is a likely place of memory issues @@ -717,33 +707,42 @@ def main( pickle.dump(self.runs, f) if ensemble is not None: - train_pred = self.predict( - set_="train", - ensemble=ensemble, - selected_keys=candidate_models, - n_preds=len(candidate_models), - index_run=iteration, - ) - # We can't use candidate_models here, as n_sel_* might be empty - valid_pred = self.predict( - set_="valid", - ensemble=ensemble, - selected_keys=n_sel_valid, - n_preds=len(candidate_models), - index_run=iteration, - ) - # TODO if predictions fails, build the model again during the - # next iteration! - test_pred = self.predict( - set_="test", - ensemble=ensemble, - selected_keys=n_sel_test, - n_preds=len(candidate_models), - index_run=iteration, - ) - - # Add a score to run history to see ensemble progress - self._add_ensemble_trajectory(train_pred, valid_pred, test_pred) + performance_stamp = {"Timestamp": pd.Timestamp.now()} + + for kind, score_name, models in [ + ("ensemble", "optimization", candidate_models), + ("valid", "val", valid_models), + ("test", "test", test_models), + ]: + if len(candidate_models) != len(models): + self.logger.info( + "Found inconsistent number of predictions and models" + f" ({len(candidate_models)} vs {len(models)}) for subset {kind}" + ) + else: + run_preds = [ + r.predictions(kind, precision=self.precision) for r in models + ] + pred = ensemble.predict(run_preds) + + # Pretty sure this whole step is uneeded but left over and afraid + # to touch + if self.task_type == BINARY_CLASSIFICATION: + pred = pred[:, 1] + + if pred.ndim == 1 or pred.shape[1] == 1: + pred = np.vstack( + ((1 - pred).reshape((1, -1)), pred.reshape((1, -1))) + ).transpose() + + score = calculate_score( + solution=self.targets(kind), + prediction=pred, + task_type=self.task_type, + metric=self.metric, + scoring_functions=None + ) + performance_stamp[f"ensemble_{score_name}_score"] = score # The loaded predictions and hash can only be saved after the ensemble has been # built, because the hash is computed during the construction of the ensemble @@ -987,73 +986,7 @@ def get_nbest( return keep - def get_valid_test_preds( - self, - selected_keys: list[str], - ) -> tuple[list[str], list[str]]: - """Get valid and test predictions from disc and store in self.run_predictions - - Parameters - ---------- - selected_keys: list - list of selected keys of self.run_predictions - - Return - ------ - keys_valid: list[str], keys_test: list[str] - All keys in selected keys for which we could read the valid and test - predictions. - """ - success_keys_valid = [] - success_keys_test = [] - - for k in selected_keys: - run = self.runs[k] - - rundir = Path(self.backend.get_numrun_directory(*run.id)) - - valid_fn = rundir / f"predictions_valid_{run}.npy" - test_fn = rundir / f"predictions_test_{run}.npy" - - if valid_fn.exists(): - if ( - run.mtime_valid == valid_fn.stat().st_mtime - and k in self.run_predictions - and self.run_predictions[k][Y_VALID] is not None - ): - success_keys_valid.append(k) - continue - - else: - try: - y_valid = self._predictions_from(valid_fn) - self.run_predictions[k][Y_VALID] = y_valid - success_keys_valid.append(k) - run.mtime_valid = valid_fn.stat().st_mtime - - except Exception: - self.logger.warning(f"Err {valid_fn}:{traceback.format_exc()}") - - if test_fn.exists(): - if ( - run.mtime_test == test_fn.stat().st_mtime - and k in self.run_predictions - and self.run_predictions[k][Y_TEST] is not None - ): - success_keys_test.append(k) - - else: - try: - y_test = self._predictions_from(test_fn) - self.run_predictions[k][Y_TEST] = y_test - success_keys_test.append(k) - run.mtime_test = os.path.getmtime(test_fn) - except Exception: - self.logger.warning(f"Err {test_fn}:{traceback.format_exc()}") - - return success_keys_valid, success_keys_test - - def fit_ensemble(self, selected_keys: list[str]) -> EnsembleSelection: + def fit_ensemble(self, selected_runs: list[Run]) -> EnsembleSelection: """TODO Parameters @@ -1066,9 +999,10 @@ def fit_ensemble(self, selected_keys: list[str]) -> EnsembleSelection: ensemble: EnsembleSelection The trained ensemble """ - predictions_train = [self.run_predictions[k][Y_ENSEMBLE] for k in selected_keys] - - selected_runs = [self.runs[k] for k in selected_keys] + predictions_train = [ + run.predictions("ensemble", precision=self.precision) + for run in selected_runs + ] # List of (seed, num_run, budget) include_num_runs = [run.id for run in selected_runs] @@ -1081,6 +1015,7 @@ def fit_ensemble(self, selected_keys: list[str]) -> EnsembleSelection: for i in range(len(predictions_train)) ] ) + if self.last_hash == current_hash: self.logger.debug( "No new model predictions selected -- skip ensemble building " @@ -1097,204 +1032,15 @@ def fit_ensemble(self, selected_keys: list[str]) -> EnsembleSelection: random_state=self.random_state, ) + self.logger.debug(f"Fitting ensemble on {len(predictions_train)} models") + start_time = time.time() try: - self.logger.debug(f"Fitting ensemble on {len(predictions_train)} models") - - start_time = time.time() - - # TODO y_ensemble can be None here ensemble.fit(predictions_train, self.y_ensemble, include_num_runs) - - duration = time.time() - start_time - - self.logger.debug(f"Fitting the ensemble took {duration} seconds.") - self.logger.info(ensemble) - - ens_perf = ensemble.get_validation_performance() - self.validation_performance_ = min(self.validation_performance_, ens_perf) - except Exception as e: self.logger.error(f"Caught error {e}: {traceback.format_exc()}") - ensemble = None - finally: - # Explicitly free memory - del predictions_train - return ensemble - - def predict( - self, - set_: str, - ensemble: AbstractEnsemble, - selected_keys: list, - n_preds: int, - index_run: int, - ) -> np.ndarray | None: - """Save preditions on ensemble, validation and test data on disc - - Parameters - ---------- - set_: "valid" | "test" | str - The data split name, returns preds for y_ensemble if not "valid" or "test" - - ensemble: EnsembleSelection - The trained Ensemble - - selected_keys: list[str] - List of selected keys of self.runs - - n_preds: int - Number of prediction models used for ensemble building same number of - predictions on valid and test are necessary - - index_run: int - n-th time that ensemble predictions are written to disc - - Return - ------ - np.ndarray | None - Returns the predictions if it can, else None - """ - self.logger.debug("Predicting the %s set with the ensemble!", set_) - - if set_ == "valid": - pred_set = Y_VALID - elif set_ == "test": - pred_set = Y_TEST - else: - pred_set = Y_ENSEMBLE - predictions = [self.run_predictions[k][pred_set] for k in selected_keys] - - if n_preds == len(predictions): - y = ensemble.predict(predictions) - if self.task_type == BINARY_CLASSIFICATION: - y = y[:, 1] - return y - else: - self.logger.info( - "Found inconsistent number of predictions and models (%d vs " - "%d) for subset %s", - len(predictions), - n_preds, - set_, - ) return None - def _add_ensemble_trajectory( - self, - train_pred: np.ndarray, - valid_pred: np.ndarray | None, - test_pred: np.ndarray | None, - ) -> None: - """ - Records a snapshot of how the performance look at a given training - time. - - Parameters - ---------- - train_pred: np.ndarray - The training predictions - - valid_pred: np.ndarray | None - The predictions on the validation set using ensemble + duration = time.time() - start_time + self.logger.debug(f"Fitting the ensemble took {duration} seconds.") - test_pred: np.ndarray | None - The predictions on the test set using ensemble - """ - if self.task_type == BINARY_CLASSIFICATION: - if len(train_pred.shape) == 1 or train_pred.shape[1] == 1: - train_pred = np.vstack( - ((1 - train_pred).reshape((1, -1)), train_pred.reshape((1, -1))) - ).transpose() - - if valid_pred is not None and ( - len(valid_pred.shape) == 1 or valid_pred.shape[1] == 1 - ): - valid_pred = np.vstack( - ((1 - valid_pred).reshape((1, -1)), valid_pred.reshape((1, -1))) - ).transpose() - - if test_pred is not None and ( - len(test_pred.shape) == 1 or test_pred.shape[1] == 1 - ): - test_pred = np.vstack( - ((1 - test_pred).reshape((1, -1)), test_pred.reshape((1, -1))) - ).transpose() - - # TODO y_ensemble can be None here - performance_stamp = { - "Timestamp": pd.Timestamp.now(), - "ensemble_optimization_score": calculate_score( - solution=self.y_ensemble, - prediction=train_pred, - task_type=self.task_type, - metric=self.metric, - scoring_functions=None, - ), - } - if valid_pred is not None: - # TODO: valid_pred are a legacy from competition manager - # and this if never happens. Re-evaluate Y_valid support - performance_stamp["ensemble_val_score"] = calculate_score( - solution=self.y_valid, - prediction=valid_pred, - task_type=self.task_type, - metric=self.metric, - scoring_functions=None, - ) - - # In case test_pred was provided - if test_pred is not None: - performance_stamp["ensemble_test_score"] = calculate_score( - solution=self.y_test, - prediction=test_pred, - task_type=self.task_type, - metric=self.metric, - scoring_functions=None, - ) - - self.ensemble_history.append(performance_stamp) - - def _delete_excess_models(self, selected_keys: list[str]) -> None: - """ - Deletes models excess models on disc. self.max_models_on_disc - defines the upper limit on how many models to keep. - Any additional model with a worst loss than the top - self.max_models_on_disc is deleted. - - Parameters - ---------- - selected_keys: list[str] - TODO - """ - # Loop through the files currently in the directory - for pred_path in self.run_ensemble_prediction_paths: - - # Do not delete candidates - if pred_path in selected_keys: - continue - - match = self.model_fn_re.search(pred_path) - _seed = int(match.group(1)) - _num_run = int(match.group(2)) - _budget = float(match.group(3)) - - # Do not delete the dummy prediction - if _num_run == 1: - continue - - numrun_dir = self.backend.get_numrun_directory(_seed, _num_run, _budget) - try: - os.rename(numrun_dir, numrun_dir + ".old") - shutil.rmtree(numrun_dir + ".old") - - self.logger.info(f"Deleted files of non-candidate model {pred_path}") - - self.runs[pred_path].disc_space_cost_mb = None - self.runs[pred_path].loaded = 3 - self.runs[pred_path].loss = np.inf - - except Exception as e: - self.logger.error( - f"Failed to delete files of non-candidate model {pred_path} due" - f" to error {e}", - ) + return ensemble From 881ecef26423467d8b25b0b16fbe81a8d3612583 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 3 Apr 2022 15:29:20 +0200 Subject: [PATCH 050/117] Change to simpler hashing --- autosklearn/ensemble_building/builder.py | 33 +++++++++++++----------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index a509e2bf3b..13044e399d 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -12,6 +12,7 @@ import traceback import zlib from dataclasses import dataclass +from functools import reduce from itertools import accumulate from pathlib import Path @@ -616,6 +617,7 @@ def main( elif run.loss is not None and run.pred_modified("ensemble"): self.logger.debug(f"{run.id} had its predictions modified?") + run.record_modified_times() # re-mark modfied times runs_to_compute_loss.append(run) # Sort by last modified @@ -740,7 +742,7 @@ def main( prediction=pred, task_type=self.task_type, metric=self.metric, - scoring_functions=None + scoring_functions=None, ) performance_stamp[f"ensemble_{score_name}_score"] = score @@ -999,22 +1001,14 @@ def fit_ensemble(self, selected_runs: list[Run]) -> EnsembleSelection: ensemble: EnsembleSelection The trained ensemble """ - predictions_train = [ - run.predictions("ensemble", precision=self.precision) - for run in selected_runs - ] - # List of (seed, num_run, budget) include_num_runs = [run.id for run in selected_runs] - # check hash if ensemble training data changed - # TODO could we just use the size, and the last row? - current_hash = "".join( - [ - str(zlib.adler32(predictions_train[i].data.tobytes())) - for i in range(len(predictions_train)) - ] - ) + # Compute hash based on the run ids and when they were last modified + hash_components = [ + hash(r.id, r.record_mtimes["ensemble"]) for r in selected_runs + ] + current_hash = reduce(lambda a, b: a ^ b, hash_components) if self.last_hash == current_hash: self.logger.debug( @@ -1023,6 +1017,11 @@ def fit_ensemble(self, selected_runs: list[Run]) -> EnsembleSelection: ) return None + predictions_train = [ + run.predictions("ensemble", precision=self.precision) + for run in selected_runs + ] + self._last_hash = current_hash ensemble = EnsembleSelection( @@ -1035,7 +1034,11 @@ def fit_ensemble(self, selected_runs: list[Run]) -> EnsembleSelection: self.logger.debug(f"Fitting ensemble on {len(predictions_train)} models") start_time = time.time() try: - ensemble.fit(predictions_train, self.y_ensemble, include_num_runs) + ensemble.fit( + predictions=predictions_train, + labels=self.targets("ensemble"), + identifiers=include_num_runs, + ) except Exception as e: self.logger.error(f"Caught error {e}: {traceback.format_exc()}") return None From ee9fdef09ac794461b08917f65429b1867cbb57a Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 3 Apr 2022 23:07:35 +0200 Subject: [PATCH 051/117] Start value split --- autosklearn/util/functional.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/autosklearn/util/functional.py b/autosklearn/util/functional.py index 4f21090d58..a4cb07f599 100644 --- a/autosklearn/util/functional.py +++ b/autosklearn/util/functional.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Callable, Iterable, Iterator, TypeVar +from typing import Callable, Iterable, Sequence, TypeVar from functools import reduce @@ -160,3 +160,23 @@ def findwhere(itr: Iterable[T], func: Callable[[T], bool], *, default: int = -1) The index where func was True """ return next((i for i, t in enumerate(itr) if func(t)), default) + + +def value_split( + lst: Sequence[float], + *, + low: float | None = None, + high: float | None = None, + at: float = 0.5, + sort: bool = True, +) -> tuple[list[float], list[float]]: + if sort: + lst = sorted(lst) + + if low is None: + low = lst[0] + + if high is None: + high = lst[-1] + + From ba75c2c0a78d80d7a0137dceed2148eb946c1f76 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Mon, 4 Apr 2022 09:19:59 +0200 Subject: [PATCH 052/117] Add `value_split` --- autosklearn/util/functional.py | 67 ++++++++++++++++++++++++++++++---- 1 file changed, 60 insertions(+), 7 deletions(-) diff --git a/autosklearn/util/functional.py b/autosklearn/util/functional.py index a4cb07f599..e855e2de26 100644 --- a/autosklearn/util/functional.py +++ b/autosklearn/util/functional.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Callable, Iterable, Sequence, TypeVar +from typing import Callable, Iterable, Sequence, TypeVar, no_type_check from functools import reduce @@ -157,26 +157,79 @@ def findwhere(itr: Iterable[T], func: Callable[[T], bool], *, default: int = -1) Returns ------- int - The index where func was True + The first index where func was True """ return next((i for i, t in enumerate(itr) if func(t)), default) +@no_type_check def value_split( - lst: Sequence[float], + lst: Sequence[T], *, + key: Callable[[T], float] | None = None, low: float | None = None, high: float | None = None, at: float = 0.5, sort: bool = True, -) -> tuple[list[float], list[float]]: +) -> tuple[list[T], list[T]]: + """Split a list according to it's values. + + ..code:: python + + # low at = 0.75 high + # -----|----------------|---------| + # 0 20 80 100 + + x = np.linspace(0, 100, 21) + # [0, 5, 10, ..., 95, 100] + + lower, higher = value_split(x, at=0.6, low=20) + + print(lower, higher) + # [0, 5, 10, ..., 75] [80, ..., 100] + + Parameters + ---------- + lst : Sequence[T] + The list of items to split + + key : Callable[[T], float] | None = None + An optional key to access the values by + + low : float | None = None + The lowest value to consider, otherwise will use the minimum in lst + + high : float | None = None + The highest value to consider, otherwise will use the maximum in lst + + at : float = 0.5 + At what perecentage to split at + + sort : bool = True + Whether to sort the values, set to False if values are sorted before hand + + Returns + ------- + tuple[list[T], list[T]] + The lower and upper parts of the list based on the split + """ if sort: - lst = sorted(lst) + lst = sorted(lst) if key is None else sorted(lst, key=key) if low is None: - low = lst[0] + low = lst[0] if key is None else key(lst[0]) if high is None: - high = lst[-1] + high = lst[-1] if key is None else key(lst[-1]) + + # Convex combination of two points + pivot_value = (1 - at) * low + (at) * high + + if key is None: + greater_than_pivot = (lambda x: x >= pivot_value) + else: + greater_than_pivot = (lambda x: key(x) >= pivot_value) + pivot_idx = findwhere(lst, greater_than_pivot, default=len(lst)) + return lst[:pivot_idx], lst[pivot_idx:] From be44195c8bdb69e87dc653e6ba4e811090e4b8ac Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 8 Apr 2022 11:36:25 +0200 Subject: [PATCH 053/117] Reworked Builder --- autosklearn/ensemble_building/builder.py | 940 ++++++++++------------- autosklearn/ensemble_building/run.py | 138 ++++ autosklearn/util/functional.py | 114 ++- 3 files changed, 614 insertions(+), 578 deletions(-) create mode 100644 autosklearn/ensemble_building/run.py diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 13044e399d..30dda7af6a 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -10,9 +10,6 @@ import shutil import time import traceback -import zlib -from dataclasses import dataclass -from functools import reduce from itertools import accumulate from pathlib import Path @@ -25,139 +22,10 @@ from autosklearn.constants import BINARY_CLASSIFICATION from autosklearn.ensembles.ensemble_selection import EnsembleSelection from autosklearn.metrics import Scorer, calculate_loss, calculate_score -from autosklearn.util.disk import sizeof -from autosklearn.util.functional import bound, findwhere, intersection, itersplit +from autosklearn.util.functional import bound, cut, intersection, split_by, value_split from autosklearn.util.logging_ import get_named_client_logger from autosklearn.util.parallel import preload_modules - -Y_ENSEMBLE = 0 -Y_VALID = 1 -Y_TEST = 2 - - -RunID = tuple[int, int, float] - - -@dataclass -class Run: - """Dataclass for storing information about a run""" - - seed: int - num_run: int - ens_file: str - dir: Path - budget: float = 0.0 - loss: float | None = None - _mem_usage: int | None = None - # The recorded time of ensemble/test/valid predictions modified - recorded_mtimes: dict[str, float] = 0 - # Lazy keys so far: - # 0 - not loaded - # 1 - loaded and in memory - # 2 - loaded but dropped again - # 3 - deleted from disk due to space constraints - loaded: int = 0 - - @property - def mem_usage(self) -> float: - """The memory usage of this run based on it's directory""" - if self._mem_usage is None: - self._mem_usage = round(sizeof(self.dir, unit="MB"), 2) - - return self._mem_usage - - def is_dummy(self) -> bool: - """Whether this run is a dummy run or not""" - return self.num_run == 1 - - def pred_modified(self, kind: Literal["ensemble", "valid", "test"]) -> bool: - """Query for when the ens file was last modified""" - if self.recorded_mtimes is None: - raise RuntimeError("No times were recorded, use `record_modified_times`") - - if kind not in self.recorded_mtimes: - raise ValueError(f"Run has no recorded time for {kind}: {self}") - - recorded = self.recorded_mtimes[kind] - last = self.pred_path(kind).stat().st_mtime - - return recorded == last - - def pred_path(self, kind: Literal["ensemble", "valid", "test"]) -> Path: - """Get the path to certain predictions""" - fname = f"predictions_{kind}_{self.seed}_{self.num_run}_{self.budget}.npy" - return self.dir / fname - - def record_modified_times(self) -> None: - """Records the last time each prediction file type was modified, if it exists""" - for kind in ["ensemble", "valid", "test"]: - path = self.pred_path(kind) - if path.exists(): - self.recorded_mtimes[kind] = path.stat().st_mtime() - - def predictions( - self, - kind: Literal["ensemble", "valid", "test"], - precision: type | None = None, - ) -> Path: - """Load the predictions for this run - - Parameters - ---------- - kind : Literal["ensemble", "valid", "test"] - The kind of predictions to load - - precisions : type | None = None - What kind of precision reduction to apply - - Returns - ------- - np.ndarray - The loaded predictions - """ - path = self.pred_path(kind) - - with path.open("rb") as f: - # TODO: We should probably remove this requirement. I'm not sure why model - # predictions are being saved as pickled - predictions = np.load(f, allow_pickle=True) - - dtypes = {16: np.float16, 32: np.float32, 64: np.float64} - dtype = dtypes.get(precision, predictions.dtype) - predictions = predictions.astype(dtype=dtype, copy=False) - - return predictions - - @property - def id(self) -> RunID: - """Get the three components of it's id""" - return self.seed, self.num_run, self.budget - - def __str__(self) -> str: - return f"{self.seed}_{self.num_run}_{self.budget}" - - def __hash__(self) -> int: - return hash(self.id) - - @staticmethod - def from_dir(dir: Path) -> Run: - """Creates a Run from a path point to the directory of a run - - Parameters - ---------- - dir: Path - Expects something like /path/to/{seed}_{numrun}_budget - - Returns - ------- - Run - The run object generated from the directory - """ - name = dir.name - seed, num_run, budget = name.split("_") - run = Run(seed=seed, num_run=num_run, budget=budget, dir=dir) - run.record_modified_times() - return run +from autosklearn.ensemble_building.run import Run, RunID class EnsembleBuilder: @@ -296,85 +164,36 @@ def __init__( self._y_test: np.ndarray | None = datamanager.data.get("Y_test", None) self._y_ensemble: np.ndarray | None = None - # max_resident_models keeps the maximum number of models in disc - # Calculated during `main` - self.max_resident_models: int | None = None - - # Cached items, loaded by properties - # Check the corresponing properties for descriptions - self._run_prediction_paths: list[str] | None = None - self._run_predictions: dict[str, dict[int, np.ndarray]] | None = None - self._last_hash: str | None = None - self._runs: dict[str, Run] | None = None - - @property - def run_predictions_path(self) -> Path: - """Path to the cached predictions we store between runs""" - return Path(self.backend.internals_directory) / "ensemble_read_preds.pkl" - @property def runs_path(self) -> Path: """Path to the cached losses we store between runs""" return Path(self.backend.internals_directory) / "ensemble_read_losses.pkl" - @property - def run_predictions(self) -> dict[str, dict[int, np.ndarray]]: - """Get the cached predictions from previous runs - { - "file_name": { - Y_ENSEMBLE: np.ndarray - Y_VALID: np.ndarray - Y_TEST: np.ndarray - } - } - """ - if self._run_predictions is None: - self._run_predictions = {} - self._last_hash = "" - - path = self.run_predictions_path - if path.exists(): - with path.open("rb") as memory: - self._run_predictions, self._last_hash = pickle.load(memory) - - return self._run_predictions - - @property - def last_hash(self) -> str: - """Get the last hash associated with the run predictions""" - if self._last_hash is None: - self._run_predictions = {} - self._last_hash = "" - - path = self.run_predictions_path - if path.exists(): - with path.open("rb") as memory: - self._run_predictions, self._last_hash = pickle.load(memory) - - return self._last_hash - - @property - def runs(self) -> list[Run]: - """Get the cached information from previous runs""" - if self._runs is None: - # First read in all the runs on disk - runs_dir = Path(self.backend.get_runs_directory()) - all_runs = [Run.from_dir(dir) for dir in runs_dir.iterdir()] - - # Next, get the info about runs from last EnsembleBulder run, if any - loaded_runs: dict[RunID, Run] = {} - if self.runs_path.exists(): - with self.runs_path.open("rb") as memory: - loaded_runs = pickle.load(memory) + def previous_candidates(self) -> dict[RunID, Run]: + """Load any previous candidates that were saved from previous runs - # Update any run that was loaded but we didn't have previously - for run in all_runs: - if run.id not in loaded_runs: - loaded_runs[run.id] = run + Returns + ------- + dict[RunID, Run] + A dictionary from RunId's to the previous candidates + """ + if self.runs_path.exists(): + with self.runs_path.open("rb") as f: + return pickle.load(f) + else: + return {} - self._runs = loaded_runs + def available_runs(self) -> dict[RunID, Run]: + """Get a dictionary of all available runs on the filesystem - return list(self._runs.values()) + Returns + ------- + dict[RunID, Run] + A dictionary from RunId's to the available runs + """ + runs_dir = Path(self.backend.get_runs_directory()) + runs = [Run.from_dir(dir) for dir in runs_dir.iterdir()] + return {run.id: run for run in runs} def targets(self, kind: Literal["ensemble", "valid", "test"]) -> np.ndarray | None: """The ensemble targets used for training the ensemble @@ -580,71 +399,47 @@ def main( self.logger.debug(f"No targets for ensemble: {traceback.format_exc()}") return self.ensemble_history, self.ensemble_nbest, None, None, None - # Get our runs - runs = self.runs + # Load in information from previous candidates and also runs + runs = self.available_runs() - # No runs found, exit early - if len(self.runs) == 0: - self.logger.debug("Found no predictions on ensemble data set") - return self.ensemble_history, self.ensemble_nbest, None, None, None + # Update runs with information of available previous candidates + previous_candidates = self.previous_candidates() + runs.update(previous_candidates) - # We filter out all runs that don't have any predictions for the ensemble - with_predictions, without_predictions = itersplit( - runs, func=lambda r: r.pred_path("ensemble").exists() - ) + # We just need the values now, not the key value pairs {run.id: Run} + runs = list(runs.values()) - if len(without_predictions) > 0: - self.logger.warn(f"Have no ensemble predictions for {without_predictions}") - - runs = with_predictions - - # Calculating losses - # - # We need to calculate the loss of runs for which we have not done so yet. - # To do so, we first filter out runs that already have a loss - # and have not had their predictions modified. - # - # We then compute the losses for the runs remaining, sorted by their - # last-modified time, such that oldest are computed first. We only compute - # `self.read_at_most` of them, if specified, to ensure we don't spend too much - # time reading and computing losses. - # - # Filter runs that need their losses computed - runs_to_compute_loss = [] - for run in runs: - if run.loss is None or run.loss == np.inf: - runs_to_compute_loss.append(run) + if len(runs) == 0: + self.logger.debug("Found no runs") + return self.ensemble_history, self.ensemble_nbest, None, None, None - elif run.loss is not None and run.pred_modified("ensemble"): - self.logger.debug(f"{run.id} had its predictions modified?") - run.record_modified_times() # re-mark modfied times - runs_to_compute_loss.append(run) - - # Sort by last modified - by_last_modified = lambda r: r.record_mtimes["ensemble"] - runs_to_compute_loss = sorted(runs_to_compute_loss, key=by_last_modified) - - # Limit them if needed - if self.read_at_most is not None: - runs_to_compute_loss = runs_to_compute_loss[: self.read_at_most] - - # Calculate their losses - ensemble_targets = self.targets("ensemble") - for run in runs_to_compute_loss: - loss = self.run_loss(run, targets=ensemble_targets, kind="ensemble") - run.loaded = 2 - run.loss = loss - - n_read_total = sum(run.loaded > 0 for run in runs) - self.logger.debug( - f"Done reading {len(runs_to_compute_loss)} new prediction files." - f"Loaded {n_read_total} predictions in total." + # Calculate the loss for those that require it + requires_update = self.requires_loss_update(runs, limit=self.read_at_most) + for run in requires_update: + run.loss = self.loss(run, kind="ensemble") + + # Decide if self.max_models_on_disk is an + if isinstance(self.max_models_on_disc, int): + max_models_on_disk = self.max_models_on_disc, + memory_limit = None + elif isinstance(self.max_models_on_disc, float): + max_models_on_disk = None + memory_limit = self.max_models_on_disc + else: + max_models_on_disk = None + memory_limit = None + + candidates, discarded = self.candidates( + runs=runs, + better_than_dummy=True, + nbest=self.ensemble_nbest, + max_models_on_disk=max_models_on_disk, + memory_limit=memory_limit, + performance_range_threshold=self.performance_range_threshold ) - # Only the models with the n_best predictions are candidates - # to be in the ensemble - candidates = self.get_nbest() if len(candidates) == 0: + self.logger.debug("No viable candidates found for ensemble building") return self.ensemble_history, self.ensemble_nbest, None, None, None # Get a set representation of them as we will begin doing intersections @@ -660,54 +455,64 @@ def main( # Find the intersect between the most groups and use that to fit the ensemble intersect = intersection(candidates_set, valid_set, test_set) if len(intersect) > 0: - candidate_models, valid_models, test_models = sorted(list(intersect)) + candidate_models = sorted(list(intersect)) + valid_models = candidate_models + test_models = candidate_models elif len(candidates_set & valid_set) > 0: - candidate_models, valid_models = sorted(list(candidates_set & valid_set)) + candidate_models = sorted(list(candidates_set & valid_set)) + valid_models = candidate_models test_models = [] elif len(candidates_set & test_set) > 0: - candidate_models, test_models = sorted(list(candidates_set & test_set)) + candidate_models = sorted(list(candidates_set & test_set)) valid_models = [] + test_models = candidate_models # This has to be the case else: + candidate_models = sorted(list(candidates_set)) test_models = [] valid_models = [] - # train ensemble - ensemble = self.fit_ensemble(selected_keys=candidate_models) + # To save on pickle and to allow for fresh predictions, unload the cache + # before pickling + for run in candidate_models: + run.unload_cache() - # Save the ensemble for later use in the main auto-sklearn module! - if ensemble is not None: - self.logger.info(ensemble) - - ens_perf = ensemble.get_validation_performance() - self.validation_performance_ = min(self.validation_performance_, ens_perf) - self.backend.save_ensemble(ensemble, iteration, self.seed) - - # Delete files of non-candidate models - can only be done after fitting the - # ensemble and saving it to disc so we do not accidentally delete models in - # the previous ensemble - if self.max_resident_models is not None: - to_delete = set(runs) - set(candidate_models) - to_delete = {r for r in to_delete if not r.is_dummy()} - for run in to_delete: - try: - shutil.rmtree(run.dir) - self.logger.info(f"Deleted files for {run}") - except Exception as e: - self.logger.error(f"Failed to delete files for {run}: \n{e}") - finally: - run.mem_usage = None - run.loaded = 3 - run.loss = np.inf - - # Save the read losses status for the next iteration, we should do this - # before doing predictions as this is a likely place of memory issues + # Save the candidates for the next round with self.runs_path.open("wb") as f: - pickle.dump(self.runs, f) + pickle.dump({run.id: run for run in candidate_models}, f) + # If there was any change from the previous run, either in terms of + # runs or one of those runs had its loss updated, then we need to + # fit the ensemble builder + previous_candidate_ids = set(previous_candidates.keys()) + current_candidate_ids = set(run.id for run in candidate_models) + if ( + len(previous_candidate_ids ^ current_candidate_ids) > 0 + or any(run in candidate_models for run in requires_update) + ): + ensemble = self.fit_ensemble(selected_keys=candidate_models) + if ensemble is not None: + self.logger.info(ensemble) + ens_perf = ensemble.get_validation_performance() + self.validation_performance_ = min( + self.validation_performance_, ens_perf + ) + self.backend.save_ensemble(ensemble, iteration, self.seed) + + # Delete files for models which were not considered candidates + if len(discarded) > 0: + for run in discarded: + if not run.is_dummy(): + try: + shutil.rmtree(run.dir) + self.logger.info(f"Deleted files for {run}") + except Exception as e: + self.logger.error(f"Failed to delete files for {run}: \n{e}") + + # Continue with evaluating the ensemble after making some space if ensemble is not None: performance_stamp = {"Timestamp": pd.Timestamp.now()} @@ -716,41 +521,37 @@ def main( ("valid", "val", valid_models), ("test", "test", test_models), ]: - if len(candidate_models) != len(models): - self.logger.info( - "Found inconsistent number of predictions and models" - f" ({len(candidate_models)} vs {len(models)}) for subset {kind}" - ) - else: - run_preds = [ - r.predictions(kind, precision=self.precision) for r in models - ] - pred = ensemble.predict(run_preds) - - # Pretty sure this whole step is uneeded but left over and afraid - # to touch - if self.task_type == BINARY_CLASSIFICATION: - pred = pred[:, 1] - - if pred.ndim == 1 or pred.shape[1] == 1: - pred = np.vstack( - ((1 - pred).reshape((1, -1)), pred.reshape((1, -1))) - ).transpose() - - score = calculate_score( - solution=self.targets(kind), - prediction=pred, - task_type=self.task_type, - metric=self.metric, - scoring_functions=None, - ) - performance_stamp[f"ensemble_{score_name}_score"] = score - - # The loaded predictions and hash can only be saved after the ensemble has been - # built, because the hash is computed during the construction of the ensemble - with self.run_predictions_path.open("wb") as f: - item = (self.run_predictions, self.last_hash) - pickle.dump(item, f) + if len(models) == 0: + continue + + targets = self.targets(kind) + if targets is None: + self.logger.warning(f"No ensemble targets for {kind}") + continue + + run_preds = [ + r.predictions(kind, precision=self.precision) for r in models + ] + pred = ensemble.predict(run_preds) + + # Pretty sure this whole step is uneeded but left over and afraid + # to touch + if self.task_type == BINARY_CLASSIFICATION: + pred = pred[:, 1] + + if pred.ndim == 1 or pred.shape[1] == 1: + pred = np.vstack( + ((1 - pred).reshape((1, -1)), pred.reshape((1, -1))) + ).transpose() + + score = calculate_score( + solution=targets, + prediction=pred, + task_type=self.task_type, + metric=self.metric, + scoring_functions=None, + ) + performance_stamp[f"ensemble_{score_name}_score"] = score if return_predictions: return ( @@ -763,232 +564,242 @@ def main( else: return self.ensemble_history, self.ensemble_nbest, None, None, None - def run_loss( + def step_memory_split( self, - run: Run, - targets: np.ndarray, - kind: Literal["ensemble", "val", "test"] = "ensemble", - ) -> None: - """Compute the loss of a run on a given set of targets - - NOTE - ---- - Still has a side effect of populating self.read_preds + runs: Sequence[Run], + limit: float, + sort: bool = True, + ) -> tuple[list[Run], list[Run]]: + """Split runs into Parameters ---------- - run: Run - The run to calculate the loss of + runs : Sequence[Run] + The runs to consider - targets: np.ndarray - The targets for which to calculate the losses on. - Typically the ensemble_targts. + limit : float + The memory limit in MB - targets: np.ndarray - The targets to compare against + Returns + ------- + (keep: list[Run], discarded: list[Run]) + """ + largest = max(runs, key=lambda r: r.mem_usage) + cutoff = limit - largest.mem_usage + + # Sort by loss and num run + if sort: + runs = sorted(runs, lambda r: (r.loss, r.num_run)) + + runs_with_acc_mem = zip(runs, accumulate(run.mem_usage for run in runs)) + candidates, discarded = cut(runs_with_acc_mem, at=lambda r: r[1] >= cutoff) + + self.logger.warning( + f"Limiting num of models via `memory_limit` float" + f" memory_limit={limit}" + f" cutoff={cutoff}" + f" largest={largest.mem_usage}" + f" remaining={len(candidates)}" + f" discarded={len(discarded)}" + ) - kind: "ensemble" | "val" | "test" = "ensemble" - What kind of predicitons to laod from the Runs + return candidates, discarded + def requires_loss_update(self, runs: Sequence[Run], limit: int | None) -> list[Run]: """ - # Put an entry in for the predictions if it doesn't exist - if run.id not in self.run_predictions: - self.run_predictions[run.id] = { - Y_ENSEMBLE: None, - Y_VALID: None, - Y_TEST: None, - } - try: - run_predictions = run.predictions("ensemble", precision=self.precision) - loss = calculate_loss( - solution=targets, - prediction=run_predictions, - task_type=self.task_type, - metric=self.metric, - scoring_functions=None, - ) + Parameters + ---------- + runs : Sequence[Run] + The runs to process - except Exception: - self.logger.error( - f"Error {kind} predictions for {run}:" f" {traceback.format_exc()}" - ) - loss = np.inf + Returns + ------- + list[Run] + The runs that require a loss to be calculated + """ + queue = [] + for run in runs: + if run.loss is None or run.loss == np.inf: + queue.append(run) - finally: - run.loss = loss - run.loaded = 2 + elif run.loss is not None and run.pred_modified("ensemble"): + self.logger.debug(f"{run.id} had its predictions modified?") + run.record_modified_times() # re-mark modfied times + queue.append(run) + + if limit is not None: + return queue[:limit] + else: + return queue - def get_nbest( + def candidates( self, runs: Sequence[Run], - nbest: int | None = None, - ) -> list[Run]: - """Get best n predictions according to the loss on the "ensemble set" + *, + better_than_dummy: bool = False, + nbest: int | float | None = None, + max_models_on_disk: int | None = None, + memory_limit: float | None = None, + performance_range_threshold: float | None = None, + ) -> tuple[list[run], list[run]]: + """Get a list of candidates from `runs` + + Applies a set of reductions in order of parameters to reach a set of final + candidates. + + Expects at least one `dummy` run in `runs`. + + Parameters + ---------- + runs : Sequence[Run] + The runs to evaluate candidates from. + + better_than_dummy: bool = False + Whether the run must be better than the best dummy run to be a candidate. + In the case where there are no candidates left, the dummies will then be + used. + + nbest : int | float | None + The nbest models to select. If `int`, acts as an absolute limit. + If `float`, acts as a percentage of available candidates. - Side effects: - * Define the n-best models to use in ensemble - * Only the best models are loaded - * Any model that is not best is deletable if max models in disc is exceeded. + max_models_on_disk : int | None + The maximum amount of models allowed on disk. If the number of candidates + exceed this limit after previous filters applied, this will further + reduce the candidates. + + memory_limit : float | None + A maximum memory limit in MB for the runs to occupy. If the candidates at + this point exceed this limit, the best n candidates that fit into this limit + will be chosen. + + performance_range_threshold : float | None + A number in (0, 1) to select candidates from. Expects a dummy run for worst Returns ------- - list[str] - Returns the paths of the selected models which are used as keys in - `run_predictions` and `runs` + (candidates: list[Run], discarded: list[Run]) + A tuple of runs that are candidates and also those that didn't make it """ - if nbest is None: - nbest = self.ensemble_nbest - - # Getting the candidates - # - # First we must split out dummy runs and real runs. We sort the dummy - # runs to then remove any real ones that are worse than the best dummy. - # If this removes all viable candidates, then we reinclude dummy runs - # as being viable candidates. - # - dummies, real = itersplit(runs, func=lambda r: r.is_dummy()) + all_discarded: set[Run] = {} - if len(dummies) == 0: - raise ValueError("We always expect a dummy run, i.e. a run with num_run=1") + # We filter out all runs that don't have any predictions for the ensemble + has_predictions = lambda run: run.pred_path("ensemble").exists() + candidates, discarded = split_by(runs, by=has_predictions) + all_discarded.update(discarded) - dummy_loss = sorted(dummies)[0].loss - self.logger.debug(f"Using {dummy_loss} to filter candidates") + if len(candidates) == 0: + self.logger.debug("No runs with predictions on ensemble data set") + return candidates, discarded - candidates = [r for r in real if r.loss < dummy_loss] + if len(discarded) > 0: + self.logger.warn(f"Have no ensemble predictions for {discarded}") + + # Get all the ones that have a tangible loss + candidates, discarded = split_by( + candidates, + lambda r: r.loss is not None and r.loss < np.inf, + ) + all_discarded.update(discarded) - # If there are no candidates left, use the dummies if len(candidates) == 0: - if len(real) > len(dummies): - self.logger.warning( - "No models better than random - using Dummy loss!" - f"\n\tNumber of models besides current dummy model: {len(real)}" - f"\n\tNumber of dummy models: {len(dummies)}", - ) + self.logger.debug("No runs with a usable loss") + return candidates, all_discarded - candidates = [d for d in dummies if d.seed == self.seed] + # Further split the candidates into those that are real and dummies + dummies, real = split_by(candidates, by=lambda r: r.is_dummy()) + dummies = sorted(dummies, key=lambda r: r.loss) + dummy_cutoff = dummies[0].loss - # Sort the candidates by lowest loss first and then lowest numrun going forward - candidates = sorted(candidates, key=lambda r: (r.loss, r.num_run)) + if len(dummies) == 0: + self.logger.error("Expected at least one dummy run") + raise RuntimeError("Expected at least one dummy run") - # Calculate `keep_nbest` to determine how many models to keep - # - # 1. First we use the parameter `ensemble_nbest` to determine a base - # size of how many to keep, `int` being absolute and float being - # percentage of the available candidates. - # - # 2. If `max_models_on_disc` was an int, we can take this to be absolute. - # Otherwise, we take it to be a memory *cutoff*. We also add some buffer - # to the *cutoff*, essentially giving us that the *cutoff* is - # - # cutoff = max_models_on_disc - size_of_largest_model - # - # We use the fact models are sorted based on loss, from best to worst, - # and we calculate the cumulative memory cost. From this, we determine - # how many of the best models we can keep before we go over this *cutoff*. - # This is called the `max_resident_models`. - # - # 3. Finally, we take the smaller of the two from step 1. and 2. to determine - # the amount of models to keep - # - # Use `ensemble_n_best` - n_candidates = len(candidates) - if isinstance(self.ensemble_nbest, int): - keep_nbest = min(self.ensemble_nbest, n_candidates) - else: - val = n_candidates * self.ensemble_nbest - keep_nbest = int(bound(val, low=1, high=n_candidates)) + if len(real) == 0: + self.logger.warnings("No real runs, using dummies as candidates") + candidates = dummies + return candidates, all_discarded - percent = keep_nbest / n_candidates - self.logger.debug(f"Using top {keep_nbest} of {n_candidates} ({percent:.2%})") + if better_than_dummy: + self.logger.debug(f"Using {dummy_cutoff} to filter candidates") - # Determine `max_resident_models` - self.max_resident_models = self.max_models_on_disc - if isinstance(self.max_resident_models, float): - largest_mem = max(candidates, key=lambda r: r.mem_usage) - cutoff = self.max_models_on_disc - largest_mem + candidates, discarded = split_by(real, by=lambda r: r.loss < dummy_cutoff) + all_discarded.update(discarded) - total = sum(r.mem_usage for r in candidates) - if total <= cutoff: - self.max_resident_models = None - else: - # Index of how many models before we go over the cutoff - mem_usage_for_n_models = accumulate(r.mem_usage for r in candidates) - max_models = findwhere( - mem_usage_for_n_models, - lambda cost: cost > cutoff, - default=len(candidates), - ) + # If there are no real candidates left, use the dummies + if len(candidates) == 0: + candidates = dummies + if len(real) > 0: + self.logger.warning( + "No models better than random - using Dummy loss!" + f"\n\tNumber of models besides current dummy model: {len(real)}" + f"\n\tNumber of dummy models: {len(dummies)}", + ) - # Ensure we always at least have 1, even if the very first - # model would have put us over the cutoff - self.max_resident_models = max(1, max_models) + n_candidates = len(candidates) - self.logger.warning( - f"Limiting num of models via `max_models_on_disc` float" - f" max_models_on_disc={self.max_models_on_disc}" - f" cutoff={cutoff}" - f" worst={largest_mem}" - f" num_models={self.max_resident_models}" - ) + # Decide how many instanceto keep + nkeep: int | None + if isinstance(nbest, float): + nkeep = int(bound(n_candidates * nbest, bounds=(1, n_candidates))) + else: + nkeep = nbest + + if nkeep is None and max_models_on_disk is not None: + nkeep = max_models_on_disk + elif nkeep is not None and max_models_on_disk < nkeep: + self.logger.warning( + f"Limiting {n_candidates} by `max_models_on_disk={max_models_on_disk}`" + f"instead of {nkeep} (set from `nbest={nbest}`)" + ) + nkeep = max_models_on_disk + else: + nkeep = nkeep - if ( - self.max_resident_models is not None - and self.max_resident_models < keep_nbest - ): - self.logger.debug( - f"Restricting the number of models to {self.max_resident_models}" - f"instead of {keep_nbest} due to argument " + # Sort the candidates so that they ordered by best loss, using num_run for tie + candidates = sorted(candidates, key=lambda r: (r.loss, r.num_run)) + + # If we need to specify how many to keep, keep that many + if nkeep is not None: + candidates, discarded = cut(candidates, at=nkeep) + all_discarded.update(discarded) + self.logger.info(f"Discarding {len(discarded)}/{n_candidates} runs") + + # Choose which ones to discard if there's a memory limit + if memory_limit is not None: + candidates, discarded = self.memory_split( + runs=candidates, + limit=memory_limit, + sort=False, # Already sorted ) - keep_nbest = self.max_resident_models - - # consider performance_range_threshold - # - # - if self.performance_range_threshold > 0: - best = runs[0].loss - cutoff = dummy_loss - (dummy_loss - best) * self.performance_range_threshold - - considered = candidates[:keep_nbest] - if considered[-1].loss > cutoff: - # Find the first run that is worse than the cutoff - cutoff_run_idx = findwhere( - considered, - lambda r: r.loss >= cutoff, - default=len(considered), - ) + all_discarded.update(discarded) + + if performance_range_threshold is not None: + high = dummies[0].loss + low = candidates[0].loss + candidates, discarded = value_split( + candidates, + high=high, + low=low, + at=performance_range_threshold, + key=lambda run: run.loss, + sort=False, # Already sorted + ) + all_discarded.update(discarded) + + return candidates, all_discarded - # Make sure we always keep at least 1 - keep_nbest = max(1, cutoff_run_idx) - - keep, unload = candidates[:keep_nbest], candidates[keep_nbest:] - - # remove loaded predictions for non-winning models - for run in unload: - if run.id in self.run_predictions: - self.run_predictions[run.id][Y_ENSEMBLE] = None - self.run_predictions[run.id][Y_VALID] = None - self.run_predictions[run.id][Y_TEST] = None - - if run.loaded == 1: - self.logger.debug(f"Dropping model {run}") - run.loaded = 2 - - # Load the predictions for the winning - for run in keep: - if run.loaded != 3 and ( - run.id not in self.run_predictions - or self.run_predictions[run.id][Y_ENSEMBLE] is None - ): - # No need to load valid and test here because they are loaded only if - # the model ends up in the ensemble - predictions = run.predictions("ensemble", precision=self.precision) - self.run_predictions[run.id][Y_ENSEMBLE] = predictions - run.loaded = 1 - - return keep - - def fit_ensemble(self, selected_runs: list[Run]) -> EnsembleSelection: + def fit_ensemble( + self, + runs: list[Run], + size: int | None = None, + task: int | None = None, + metric: Scorer | None = None, + precision: type | None = None, + targets: np.ndarray | None = None, + random_state: int | np.random.RandomState | None = None + ) -> EnsembleSelection: """TODO Parameters @@ -1001,49 +812,70 @@ def fit_ensemble(self, selected_runs: list[Run]) -> EnsembleSelection: ensemble: EnsembleSelection The trained ensemble """ - # List of (seed, num_run, budget) - include_num_runs = [run.id for run in selected_runs] - - # Compute hash based on the run ids and when they were last modified - hash_components = [ - hash(r.id, r.record_mtimes["ensemble"]) for r in selected_runs - ] - current_hash = reduce(lambda a, b: a ^ b, hash_components) - - if self.last_hash == current_hash: - self.logger.debug( - "No new model predictions selected -- skip ensemble building " - f"-- current performance: {self.validation_performance_}", - ) - return None - - predictions_train = [ - run.predictions("ensemble", precision=self.precision) - for run in selected_runs - ] - - self._last_hash = current_hash + task = task if task is not None else self.task_type + size = size if size is not None else self.ensemble_size + metric = metric if metric is not None else self.metric + rs = random_state if random_state is not None else self.random_state ensemble = EnsembleSelection( - ensemble_size=self.ensemble_size, - task_type=self.task_type, - metric=self.metric, - random_state=self.random_state, + ensemble_size=size, + task_type=task, + metric=metric, + random_state=rs, ) - self.logger.debug(f"Fitting ensemble on {len(predictions_train)} models") + self.logger.debug(f"Fitting ensemble on {len(runs)} models") start_time = time.time() + try: + precision = precision if precision is not None else self.precision + predictions_train = [ + run.predictions("ensemble", precision=precision) + for run in runs + ] + + targets = targets if targets is not None else self.targets("ensemble") ensemble.fit( predictions=predictions_train, - labels=self.targets("ensemble"), - identifiers=include_num_runs, + labels=targets, + identifiers=[run.id for run in runs], ) except Exception as e: self.logger.error(f"Caught error {e}: {traceback.format_exc()}") - return None + ensemble = None + finally: + duration = time.time() - start_time + self.logger.debug(f"Fitting the ensemble took {duration} seconds.") + return ensemble - duration = time.time() - start_time - self.logger.debug(f"Fitting the ensemble took {duration} seconds.") + def loss( + self, + run: Run, + kind: Literal["ensemble", "valid", "test"] = "ensemble", + ) -> float: + """Calculate the loss for a list of runs - return ensemble + Parameters + ---------- + run: Run + The run to calculate the loss for + + Returns + ------- + float + The loss for the run + """ + try: + predictions = run.predictions(kind, precision=self.precision) + targets = self.targets(kind) + loss = calculate_loss( + solution=targets, + prediction=predictions, + task_type=self.task_type, + metric=self.metric, + ) + except Exception: + self.logger.error(f"Error getting loss for {run}: {traceback.format_exc()}") + loss = np.inf + finally: + return loss diff --git a/autosklearn/ensemble_building/run.py b/autosklearn/ensemble_building/run.py new file mode 100644 index 0000000000..c6a9173b1e --- /dev/null +++ b/autosklearn/ensemble_building/run.py @@ -0,0 +1,138 @@ +from __future__ import annotations + +from typing import Any, Tuple +from typing_extensions import Literal + +from pathlib import Path + +import numpy as np + +from autosklearn.util.disk import sizeof + +RunID = Tuple[int, int, float] + + +class Run: + """Class for storing information about a run""" + + def __init__(self, path: Path) -> None: + """Creates a Run from a path point to the directory of a run + + Parameters + ---------- + path: Path + Expects something like /path/to/{seed}_{numrun}_{budget} + + Returns + ------- + Run + The run object generated from the directory + """ + name = path.name + seed, num_run, budget = name.split("_") + + self.dir = path + self.seed = int(seed) + self.num_run = int(num_run) + self.budget = float(budget) + + self.loss: float | None = None + self._mem_usage: float | None = None + + # Items that will be delete when the run is saved back to file + self._cache: dict[str, Any] = {} + + # The recorded time of ensemble/test/valid predictions modified + self.recorded_mtimes: dict[str, float] = {} + self.record_modified_times() + + @property + def mem_usage(self) -> float: + """The memory usage of this run based on it's directory""" + if self._mem_usage is None: + self._mem_usage = round(sizeof(self.dir, unit="MB"), 2) + + return self._mem_usage + + def is_dummy(self) -> bool: + """Whether this run is a dummy run or not""" + return self.num_run == 1 + + def pred_modified(self, kind: Literal["ensemble", "valid", "test"]) -> bool: + """Query for when the ens file was last modified""" + if kind not in self.recorded_mtimes: + raise ValueError(f"Run has no recorded time for {kind}: {self}") + + recorded = self.recorded_mtimes[kind] + last = self.pred_path(kind).stat().st_mtime + + return recorded == last + + def pred_path(self, kind: Literal["ensemble", "valid", "test"]) -> Path: + """Get the path to certain predictions""" + fname = f"predictions_{kind}_{self.seed}_{self.num_run}_{self.budget}.npy" + return self.dir / fname + + def record_modified_times(self) -> None: + """Records the last time each prediction file type was modified, if it exists""" + self.recorded_mtimes = {} + for kind in ["ensemble", "valid", "test"]: + path = self.pred_path(kind) # type: ignore + if path.exists(): + self.recorded_mtimes[kind] = path.stat().st_mtime + + def predictions( + self, + kind: Literal["ensemble", "valid", "test"], + precision: int | None = None, + ) -> Path: + """Load the predictions for this run + + Parameters + ---------- + kind : Literal["ensemble", "valid", "test"] + The kind of predictions to load + + precisions : type | None = None + What kind of precision reduction to apply + + Returns + ------- + np.ndarray + The loaded predictions + """ + key = f"predictions_{kind}" + if key in self._cache: + return self._cache[key] + + path = self.pred_path(kind) + + with path.open("rb") as f: + # TODO: We should probably remove this requirement. I'm not sure why model + # predictions are being saved as pickled + predictions = np.load(f, allow_pickle=True) + + if precision: + dtypes: dict[int, type] = {16: np.float16, 32: np.float32, 64: np.float64} + dtype = dtypes.get(precision, None) + + if dtype is not None: + predictions = predictions.astype(dtype=dtype, copy=False) + + self._cache[key] = predictions + return predictions + + def unload_cache(self) -> None: + """Removes the cache from this object""" + self._cache = {} + + @property + def id(self) -> RunID: + """Get the three components of it's id""" + return self.seed, self.num_run, self.budget + + def __hash__(self) -> int: + return hash(self.id) + + def __eq__(self, other: object) -> bool: + return isinstance(other, Run) and other.id == self.id diff --git a/autosklearn/util/functional.py b/autosklearn/util/functional.py index e855e2de26..467cf1e463 100644 --- a/autosklearn/util/functional.py +++ b/autosklearn/util/functional.py @@ -86,34 +86,92 @@ def intersection(*items: Iterable[T]) -> set[T]: return set(reduce(lambda s1, s2: set(s1) & set(s2), items, items[0])) -def itersplit(lst: Iterable[T], func: Callable[[T], bool]) -> tuple[list[T], list[T]]: - """Split a list in two based on a predicate +def cut( + lst: Iterable[T], + at: int | Callable[[T], bool], +) -> tuple[list[T], list[T]]: + """Cut a list in two at a given index or predicate Parameters ---------- lst : Iterable[T] - The list to split + An iterable of items - func : Callable[[T], bool] + at : int | Callable[[T], bool] + Where to split at, either an index or a predicate + + Returns + ------- + tuple[list[T], list[T]] + The split items + """ + if isinstance(at, int): + lst = list(lst) + return lst[:at], lst[at:] + else: + a = [] + itr = iter(lst) + for x in itr: + if not at(x): + a.append(x) + break + + return a, [x] + list(itr) + + +def split_by( + lst: Iterable[T], + by: Callable[[T], bool], + *, + split_at_first: bool = False, +) -> tuple[list[T], list[T]]: + """Split a list in two based on a predicate. + + Note + ---- + First element can not contain None + + Parameters + ---------- + lst : Iterable[T] + The iterator to split + + by : Callable[[T], bool] The predicate to split it on + split_at_first: bool = False + Whether to split at the first occurence of `func == True` + Returns ------- (a: list[T], b: list[T]) - Everything in a satisfies the func while nothing in b does + a is where the func is True and b is where the func was False. If using + `split_at_first = True`, b contains everything after the first + False occurence. """ a = [] b = [] - for x in lst: - if func(x): - a.append(x) - else: - b.append(x) + if split_at_first: + itr = iter(lst) + for x in itr: + if by(x): + a.append(x) + else: + break - return a, b + return a, list(itr) # Convert remaining to list + else: + for x in lst: + if by(x): + a.append(x) + else: + b.append(x) + + return a, b -def bound(val: float, *, low: float, high: float) -> float: + +def bound(val: float, bounds: tuple[float, float]) -> float: """Bounds a value between a low and high .. code:: python @@ -126,18 +184,15 @@ def bound(val: float, *, low: float, high: float) -> float: val : float The value to bound - low : float - The low to bound against - - high : float - The high to bound against + bounds: tuple[foat, float] + The bounds to bound the value between (low, high) Returns ------- float The bounded value """ - return max(low, min(val, high)) + return max(bounds[0], min(val, bounds[1])) def findwhere(itr: Iterable[T], func: Callable[[T], bool], *, default: int = -1) -> int: @@ -172,13 +227,24 @@ def value_split( at: float = 0.5, sort: bool = True, ) -> tuple[list[T], list[T]]: - """Split a list according to it's values. + """Split a list according to it's values at a certain percentage. + + Will attempt to sort the values unless specified that it should not `sort`. + The endpoints `low` and `high` are assumed to be the min and max of the sorted + `lst`. + + The value used for splitting is calculated by + + (1 - `at`) * low + `at` * high ..code:: python - # low at = 0.75 high - # -----|----------------|---------| - # 0 20 80 100 + # min low at=0.75 high/max + # |-----|----------------|---------| + # 0 20 80 100 + # + # [----------------------][++++++++] + # split 1 split 2 x = np.linspace(0, 100, 21) # [0, 5, 10, ..., 95, 100] @@ -226,9 +292,9 @@ def value_split( pivot_value = (1 - at) * low + (at) * high if key is None: - greater_than_pivot = (lambda x: x >= pivot_value) + greater_than_pivot = lambda x: x >= pivot_value else: - greater_than_pivot = (lambda x: key(x) >= pivot_value) + greater_than_pivot = lambda x: key(x) >= pivot_value pivot_idx = findwhere(lst, greater_than_pivot, default=len(lst)) From 5b8271c193b442c3f334eee75bfa14a82abac54a Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 8 Apr 2022 11:37:03 +0200 Subject: [PATCH 054/117] Add some docstring --- autosklearn/ensemble_building/run.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autosklearn/ensemble_building/run.py b/autosklearn/ensemble_building/run.py index c6a9173b1e..bc3ebc2b66 100644 --- a/autosklearn/ensemble_building/run.py +++ b/autosklearn/ensemble_building/run.py @@ -13,7 +13,7 @@ class Run: - """Class for storing information about a run""" + """Class for storing information about a run used during ensemble building""" def __init__(self, path: Path) -> None: """Creates a Run from a path point to the directory of a run From c1496ceb8c4f153a757503b9316959f00cbeb8e5 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 8 Apr 2022 11:38:45 +0200 Subject: [PATCH 055/117] Formatting --- autosklearn/ensemble_building/builder.py | 16 +++++++--------- autosklearn/ensemble_building/manager.py | 6 ++++++ autosklearn/ensemble_building/run.py | 2 +- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 30dda7af6a..dcb9070ea1 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -20,12 +20,12 @@ from autosklearn.automl_common.common.utils.backend import Backend from autosklearn.constants import BINARY_CLASSIFICATION +from autosklearn.ensemble_building.run import Run, RunID from autosklearn.ensembles.ensemble_selection import EnsembleSelection from autosklearn.metrics import Scorer, calculate_loss, calculate_score from autosklearn.util.functional import bound, cut, intersection, split_by, value_split from autosklearn.util.logging_ import get_named_client_logger from autosklearn.util.parallel import preload_modules -from autosklearn.ensemble_building.run import Run, RunID class EnsembleBuilder: @@ -420,7 +420,7 @@ def main( # Decide if self.max_models_on_disk is an if isinstance(self.max_models_on_disc, int): - max_models_on_disk = self.max_models_on_disc, + max_models_on_disk = (self.max_models_on_disc,) memory_limit = None elif isinstance(self.max_models_on_disc, float): max_models_on_disk = None @@ -435,7 +435,7 @@ def main( nbest=self.ensemble_nbest, max_models_on_disk=max_models_on_disk, memory_limit=memory_limit, - performance_range_threshold=self.performance_range_threshold + performance_range_threshold=self.performance_range_threshold, ) if len(candidates) == 0: @@ -489,9 +489,8 @@ def main( # fit the ensemble builder previous_candidate_ids = set(previous_candidates.keys()) current_candidate_ids = set(run.id for run in candidate_models) - if ( - len(previous_candidate_ids ^ current_candidate_ids) > 0 - or any(run in candidate_models for run in requires_update) + if len(previous_candidate_ids ^ current_candidate_ids) > 0 or any( + run in candidate_models for run in requires_update ): ensemble = self.fit_ensemble(selected_keys=candidate_models) if ensemble is not None: @@ -798,7 +797,7 @@ def fit_ensemble( metric: Scorer | None = None, precision: type | None = None, targets: np.ndarray | None = None, - random_state: int | np.random.RandomState | None = None + random_state: int | np.random.RandomState | None = None, ) -> EnsembleSelection: """TODO @@ -830,8 +829,7 @@ def fit_ensemble( try: precision = precision if precision is not None else self.precision predictions_train = [ - run.predictions("ensemble", precision=precision) - for run in runs + run.predictions("ensemble", precision=precision) for run in runs ] targets = targets if targets is not None else self.targets("ensemble") diff --git a/autosklearn/ensemble_building/manager.py b/autosklearn/ensemble_building/manager.py index 638b312c54..8e9a3c2aba 100644 --- a/autosklearn/ensemble_building/manager.py +++ b/autosklearn/ensemble_building/manager.py @@ -163,7 +163,13 @@ def build_ensemble( self, dask_client: dask.distributed.Client, ) -> None: + """Build the ensemble + Parameters + ---------- + dask_client: dask.distributed.Client + The dask client to use + """ # The second criteria is elapsed time elapsed_time = time.time() - self.start_time diff --git a/autosklearn/ensemble_building/run.py b/autosklearn/ensemble_building/run.py index bc3ebc2b66..05d55176e5 100644 --- a/autosklearn/ensemble_building/run.py +++ b/autosklearn/ensemble_building/run.py @@ -1,11 +1,11 @@ from __future__ import annotations from typing import Any, Tuple -from typing_extensions import Literal from pathlib import Path import numpy as np +from typing_extensions import Literal from autosklearn.util.disk import sizeof From 04c8b933d6758a386a2b0a24a40505c7b62bc062 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 8 Apr 2022 12:22:25 +0200 Subject: [PATCH 056/117] Fix type signature --- autosklearn/ensemble_building/run.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/autosklearn/ensemble_building/run.py b/autosklearn/ensemble_building/run.py index 05d55176e5..d3b76fb45d 100644 --- a/autosklearn/ensemble_building/run.py +++ b/autosklearn/ensemble_building/run.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any, Tuple +from typing import Tuple from pathlib import Path @@ -40,7 +40,7 @@ def __init__(self, path: Path) -> None: self._mem_usage: float | None = None # Items that will be delete when the run is saved back to file - self._cache: dict[str, Any] = {} + self._cache: dict[str, np.ndarray] = {} # The recorded time of ensemble/test/valid predictions modified self.recorded_mtimes: dict[str, float] = {} @@ -85,7 +85,7 @@ def predictions( self, kind: Literal["ensemble", "valid", "test"], precision: int | None = None, - ) -> Path: + ) -> np.ndarray: """Load the predictions for this run Parameters From dc09d968f54e3445d0e0a1a05c52b26b8db2d995 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 8 Apr 2022 12:26:08 +0200 Subject: [PATCH 057/117] Fix typing for `loss` --- autosklearn/ensemble_building/builder.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index dcb9070ea1..89de63f65f 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any, Sequence +from typing import Any, Sequence, cast import logging.handlers import multiprocessing @@ -863,17 +863,21 @@ def loss( float The loss for the run """ + targets = self.targets(kind) + if targets is None: + self.logger.error(f"No targets of {kind}") + return np.inf + try: predictions = run.predictions(kind, precision=self.precision) - targets = self.targets(kind) - loss = calculate_loss( + loss: float = calculate_loss( # type: ignore solution=targets, prediction=predictions, task_type=self.task_type, metric=self.metric, ) - except Exception: - self.logger.error(f"Error getting loss for {run}: {traceback.format_exc()}") + except Exception as e: + self.logger.error(f"Error getting loss {run}:{e}{traceback.format_exc()}") loss = np.inf finally: return loss From 7f6b7d982c205136e4a9827eeefd06c6d2649749 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 8 Apr 2022 12:34:33 +0200 Subject: [PATCH 058/117] Removed Literal --- autosklearn/ensemble_building/builder.py | 19 +++++++++++-------- autosklearn/ensemble_building/run.py | 9 ++++----- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 89de63f65f..5669e5ec98 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -16,7 +16,6 @@ import numpy as np import pandas as pd import pynisher -from typing_extensions import Literal from autosklearn.automl_common.common.utils.backend import Backend from autosklearn.constants import BINARY_CLASSIFICATION @@ -195,7 +194,7 @@ def available_runs(self) -> dict[RunID, Run]: runs = [Run.from_dir(dir) for dir in runs_dir.iterdir()] return {run.id: run for run in runs} - def targets(self, kind: Literal["ensemble", "valid", "test"]) -> np.ndarray | None: + def targets(self, kind: str = "ensemble") -> np.ndarray | None: """The ensemble targets used for training the ensemble It will attempt to load and cache them in memory but @@ -492,7 +491,9 @@ def main( if len(previous_candidate_ids ^ current_candidate_ids) > 0 or any( run in candidate_models for run in requires_update ): - ensemble = self.fit_ensemble(selected_keys=candidate_models) + ensemble = self.fit_ensemble( + runs=candidate_models + ) if ensemble is not None: self.logger.info(ensemble) ens_perf = ensemble.get_validation_performance() @@ -792,13 +793,14 @@ def candidates( def fit_ensemble( self, runs: list[Run], + targets: np.ndarray, + *, size: int | None = None, task: int | None = None, metric: Scorer | None = None, - precision: type | None = None, - targets: np.ndarray | None = None, + precision: int | None = None, random_state: int | np.random.RandomState | None = None, - ) -> EnsembleSelection: + ) -> EnsembleSelection | None: """TODO Parameters @@ -816,6 +818,8 @@ def fit_ensemble( metric = metric if metric is not None else self.metric rs = random_state if random_state is not None else self.random_state + ensemble: EnsembleSelection | None + ensemble = EnsembleSelection( ensemble_size=size, task_type=task, @@ -832,7 +836,6 @@ def fit_ensemble( run.predictions("ensemble", precision=precision) for run in runs ] - targets = targets if targets is not None else self.targets("ensemble") ensemble.fit( predictions=predictions_train, labels=targets, @@ -849,7 +852,7 @@ def fit_ensemble( def loss( self, run: Run, - kind: Literal["ensemble", "valid", "test"] = "ensemble", + kind: str = "ensemble" ) -> float: """Calculate the loss for a list of runs diff --git a/autosklearn/ensemble_building/run.py b/autosklearn/ensemble_building/run.py index d3b76fb45d..d44e816f6a 100644 --- a/autosklearn/ensemble_building/run.py +++ b/autosklearn/ensemble_building/run.py @@ -5,7 +5,6 @@ from pathlib import Path import numpy as np -from typing_extensions import Literal from autosklearn.util.disk import sizeof @@ -58,7 +57,7 @@ def is_dummy(self) -> bool: """Whether this run is a dummy run or not""" return self.num_run == 1 - def pred_modified(self, kind: Literal["ensemble", "valid", "test"]) -> bool: + def pred_modified(self, kind: str) -> bool: """Query for when the ens file was last modified""" if kind not in self.recorded_mtimes: raise ValueError(f"Run has no recorded time for {kind}: {self}") @@ -68,7 +67,7 @@ def pred_modified(self, kind: Literal["ensemble", "valid", "test"]) -> bool: return recorded == last - def pred_path(self, kind: Literal["ensemble", "valid", "test"]) -> Path: + def pred_path(self, kind: str) -> Path: """Get the path to certain predictions""" fname = f"predictions_{kind}_{self.seed}_{self.num_run}_{self.budget}.npy" return self.dir / fname @@ -83,14 +82,14 @@ def record_modified_times(self) -> None: def predictions( self, - kind: Literal["ensemble", "valid", "test"], + kind: str, precision: int | None = None, ) -> np.ndarray: """Load the predictions for this run Parameters ---------- - kind : Literal["ensemble", "valid", "test"] + kind : "ensemble" | "test" | "valid" The kind of predictions to load precisions : type | None = None From f45e409551d8044783206d7624dcf9aac8d59a17 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 8 Apr 2022 14:22:06 +0200 Subject: [PATCH 059/117] Mypy fixes for ensemble builder --- autosklearn/ensemble_building/builder.py | 268 +++++++++++------------ autosklearn/ensemble_building/run.py | 2 +- autosklearn/util/functional.py | 126 +---------- pyproject.toml | 2 - 4 files changed, 145 insertions(+), 253 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 5669e5ec98..c339610107 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -19,10 +19,17 @@ from autosklearn.automl_common.common.utils.backend import Backend from autosklearn.constants import BINARY_CLASSIFICATION +from autosklearn.data.xy_data_manager import XYDataManager from autosklearn.ensemble_building.run import Run, RunID from autosklearn.ensembles.ensemble_selection import EnsembleSelection from autosklearn.metrics import Scorer, calculate_loss, calculate_score -from autosklearn.util.functional import bound, cut, intersection, split_by, value_split +from autosklearn.util.functional import ( + bound, + cut, + findwhere, + intersection, + split, +) from autosklearn.util.logging_ import get_named_client_logger from autosklearn.util.parallel import preload_modules @@ -91,7 +98,8 @@ def __init__( Keep only models that are better than: - dummy + (best - dummy) * performance_range_threshold + x = performance_range_threshold + x * dummy E.g dummy=2, best=4, thresh=0.5 --> only consider models with loss > 3 @@ -149,24 +157,25 @@ def __init__( self.performance_range_threshold = performance_range_threshold # The starting time of the procedure - self.start_time = 0 + self.start_time: float = 0.0 # Track the ensemble performance - self.ensemble_history = [] + self.ensemble_history: list = [] # Keep running knowledge of its validation performance self.validation_performance_ = np.inf # Data we may need - datamanager = self.backend.load_datamanager() + datamanager: XYDataManager = self.backend.load_datamanager() self._y_valid: np.ndarray | None = datamanager.data.get("Y_valid", None) self._y_test: np.ndarray | None = datamanager.data.get("Y_test", None) self._y_ensemble: np.ndarray | None = None @property - def runs_path(self) -> Path: + def previous_candidates_path(self) -> Path: """Path to the cached losses we store between runs""" - return Path(self.backend.internals_directory) / "ensemble_read_losses.pkl" + fname = "previous_ensemble_building_candidates.pkl" + return Path(self.backend.internals_directory) / fname def previous_candidates(self) -> dict[RunID, Run]: """Load any previous candidates that were saved from previous runs @@ -176,8 +185,8 @@ def previous_candidates(self) -> dict[RunID, Run]: dict[RunID, Run] A dictionary from RunId's to the previous candidates """ - if self.runs_path.exists(): - with self.runs_path.open("rb") as f: + if self.previous_candidates_path.exists(): + with self.previous_candidates_path.open("rb") as f: return pickle.load(f) else: return {} @@ -191,7 +200,7 @@ def available_runs(self) -> dict[RunID, Run]: A dictionary from RunId's to the available runs """ runs_dir = Path(self.backend.get_runs_directory()) - runs = [Run.from_dir(dir) for dir in runs_dir.iterdir()] + runs = [Run(path=dir) for dir in runs_dir.iterdir()] return {run.id: run for run in runs} def targets(self, kind: str = "ensemble") -> np.ndarray | None: @@ -229,7 +238,7 @@ def run( return_predictions: bool = False, ) -> tuple[ list[dict[str, Any]], - int, + int | float, np.ndarray | None, np.ndarray | None, np.ndarray | None, @@ -282,6 +291,7 @@ def run( time_elapsed = time.time() - process_start_time time_left -= time_elapsed else: + assert end_at is not None current_time = time.time() if current_time > end_at: break @@ -309,7 +319,7 @@ def run( # ATTENTION: main will start from scratch; # all data structures are empty again try: - self.run_predictions_path.unlink() + self.previous_candidates_path.unlink() except: # noqa E722 pass @@ -357,7 +367,7 @@ def main( return_predictions: bool = False, ) -> tuple[ list[dict[str, Any]], - int, + int | float, np.ndarray | None, np.ndarray | None, np.ndarray | None, @@ -399,14 +409,14 @@ def main( return self.ensemble_history, self.ensemble_nbest, None, None, None # Load in information from previous candidates and also runs - runs = self.available_runs() + available_runs = self.available_runs() # Update runs with information of available previous candidates previous_candidates = self.previous_candidates() - runs.update(previous_candidates) + available_runs.update(previous_candidates) # We just need the values now, not the key value pairs {run.id: Run} - runs = list(runs.values()) + runs = list(available_runs.values()) if len(runs) == 0: self.logger.debug("Found no runs") @@ -419,7 +429,7 @@ def main( # Decide if self.max_models_on_disk is an if isinstance(self.max_models_on_disc, int): - max_models_on_disk = (self.max_models_on_disc,) + max_models_on_disk = self.max_models_on_disc memory_limit = None elif isinstance(self.max_models_on_disc, float): max_models_on_disk = None @@ -428,7 +438,7 @@ def main( max_models_on_disk = None memory_limit = None - candidates, discarded = self.candidates( + candidates, all_discarded = self.candidates( runs=runs, better_than_dummy=True, nbest=self.ensemble_nbest, @@ -454,53 +464,45 @@ def main( # Find the intersect between the most groups and use that to fit the ensemble intersect = intersection(candidates_set, valid_set, test_set) if len(intersect) > 0: - candidate_models = sorted(list(intersect)) - valid_models = candidate_models - test_models = candidate_models + candidates = list(intersect) + candidates = sorted(candidates, key=lambda r: r.id) + + valid_models = candidates + test_models = candidates elif len(candidates_set & valid_set) > 0: - candidate_models = sorted(list(candidates_set & valid_set)) - valid_models = candidate_models + intersect = candidates_set & valid_set + candidates, discarded = split(candidates, by=lambda r: r in intersect) + candidates = sorted(candidates, key=lambda r: r.id) + + valid_models = candidates test_models = [] elif len(candidates_set & test_set) > 0: - candidate_models = sorted(list(candidates_set & test_set)) + intersect = candidates_set & test_set + candidates, discarded = split(candidates, by=lambda r: r in intersect) + candidates = sorted(candidates, key=lambda r: r.id) + valid_models = [] - test_models = candidate_models + test_models = candidates - # This has to be the case else: - candidate_models = sorted(list(candidates_set)) - test_models = [] + candidates = sorted(candidates, key=lambda r: r.id) + discarded = [] + valid_models = [] + test_models = [] + + all_discarded.update(discarded) # To save on pickle and to allow for fresh predictions, unload the cache # before pickling - for run in candidate_models: + for run in candidates: run.unload_cache() # Save the candidates for the next round - with self.runs_path.open("wb") as f: - pickle.dump({run.id: run for run in candidate_models}, f) - - # If there was any change from the previous run, either in terms of - # runs or one of those runs had its loss updated, then we need to - # fit the ensemble builder - previous_candidate_ids = set(previous_candidates.keys()) - current_candidate_ids = set(run.id for run in candidate_models) - if len(previous_candidate_ids ^ current_candidate_ids) > 0 or any( - run in candidate_models for run in requires_update - ): - ensemble = self.fit_ensemble( - runs=candidate_models - ) - if ensemble is not None: - self.logger.info(ensemble) - ens_perf = ensemble.get_validation_performance() - self.validation_performance_ = min( - self.validation_performance_, ens_perf - ) - self.backend.save_ensemble(ensemble, iteration, self.seed) + with self.previous_candidates_path.open("wb") as f: + pickle.dump({run.id: run for run in candidates}, f) # Delete files for models which were not considered candidates if len(discarded) > 0: @@ -512,20 +514,50 @@ def main( except Exception as e: self.logger.error(f"Failed to delete files for {run}: \n{e}") + # If there was any change from the previous run, either in terms of + # runs or one of those runs had its loss updated, then we need to + # fit the ensemble builder + previous_candidate_ids = set(previous_candidates.keys()) + current_candidate_ids = set(run.id for run in candidates) + different_candidates = previous_candidate_ids ^ current_candidate_ids + + updated_candidates = iter(run in candidates for run in requires_update) + + if not any(different_candidates) or any(updated_candidates): + self.logger.info("All ensemble candidates the same, no update required") + return self.ensemble_history, self.ensemble_nbest, None, None, None + + targets = cast(np.ndarray, self.targets("ensemble")) # Sure they exist + ensemble = self.fit_ensemble( + candidates, + targets=targets, + size=self.ensemble_size, + task=self.task_type, + metric=self.metric, + precision=self.precision, + random_state=self.random_state, + ) + + if ensemble is not None: + self.logger.info(str(ensemble)) + ens_perf = ensemble.get_validation_performance() + self.validation_performance_ = min(self.validation_performance_, ens_perf) + self.backend.save_ensemble(ensemble, iteration, self.seed) # type: ignore + # Continue with evaluating the ensemble after making some space if ensemble is not None: performance_stamp = {"Timestamp": pd.Timestamp.now()} for kind, score_name, models in [ - ("ensemble", "optimization", candidate_models), + ("ensemble", "optimization", candidates), ("valid", "val", valid_models), ("test", "test", test_models), ]: if len(models) == 0: continue - targets = self.targets(kind) - if targets is None: + pred_targets = self.targets(kind) + if pred_targets is None: self.logger.warning(f"No ensemble targets for {kind}") continue @@ -545,13 +577,14 @@ def main( ).transpose() score = calculate_score( - solution=targets, + solution=pred_targets, prediction=pred, task_type=self.task_type, metric=self.metric, scoring_functions=None, ) performance_stamp[f"ensemble_{score_name}_score"] = score + self.ensemble_history.append(performance_stamp) if return_predictions: return ( @@ -564,47 +597,6 @@ def main( else: return self.ensemble_history, self.ensemble_nbest, None, None, None - def step_memory_split( - self, - runs: Sequence[Run], - limit: float, - sort: bool = True, - ) -> tuple[list[Run], list[Run]]: - """Split runs into - - Parameters - ---------- - runs : Sequence[Run] - The runs to consider - - limit : float - The memory limit in MB - - Returns - ------- - (keep: list[Run], discarded: list[Run]) - """ - largest = max(runs, key=lambda r: r.mem_usage) - cutoff = limit - largest.mem_usage - - # Sort by loss and num run - if sort: - runs = sorted(runs, lambda r: (r.loss, r.num_run)) - - runs_with_acc_mem = zip(runs, accumulate(run.mem_usage for run in runs)) - candidates, discarded = cut(runs_with_acc_mem, at=lambda r: r[1] >= cutoff) - - self.logger.warning( - f"Limiting num of models via `memory_limit` float" - f" memory_limit={limit}" - f" cutoff={cutoff}" - f" largest={largest.mem_usage}" - f" remaining={len(candidates)}" - f" discarded={len(discarded)}" - ) - - return candidates, discarded - def requires_loss_update(self, runs: Sequence[Run], limit: int | None) -> list[Run]: """ @@ -642,7 +634,7 @@ def candidates( max_models_on_disk: int | None = None, memory_limit: float | None = None, performance_range_threshold: float | None = None, - ) -> tuple[list[run], list[run]]: + ) -> tuple[list[Run], set[Run]]: """Get a list of candidates from `runs` Applies a set of reductions in order of parameters to reach a set of final @@ -679,28 +671,25 @@ def candidates( Returns ------- - (candidates: list[Run], discarded: list[Run]) + (candidates: list[Run], discarded: set[Run]) A tuple of runs that are candidates and also those that didn't make it """ - all_discarded: set[Run] = {} + all_discarded: set[Run] = set() # We filter out all runs that don't have any predictions for the ensemble has_predictions = lambda run: run.pred_path("ensemble").exists() - candidates, discarded = split_by(runs, by=has_predictions) + candidates, discarded = split(runs, by=has_predictions) all_discarded.update(discarded) if len(candidates) == 0: self.logger.debug("No runs with predictions on ensemble data set") - return candidates, discarded + return candidates, all_discarded if len(discarded) > 0: - self.logger.warn(f"Have no ensemble predictions for {discarded}") + self.logger.warning(f"Have no ensemble predictions for {discarded}") # Get all the ones that have a tangible loss - candidates, discarded = split_by( - candidates, - lambda r: r.loss is not None and r.loss < np.inf, - ) + candidates, discarded = split(candidates, by=lambda r: r.loss < np.inf) all_discarded.update(discarded) if len(candidates) == 0: @@ -708,7 +697,7 @@ def candidates( return candidates, all_discarded # Further split the candidates into those that are real and dummies - dummies, real = split_by(candidates, by=lambda r: r.is_dummy()) + dummies, real = split(candidates, by=lambda r: r.is_dummy()) dummies = sorted(dummies, key=lambda r: r.loss) dummy_cutoff = dummies[0].loss @@ -717,14 +706,14 @@ def candidates( raise RuntimeError("Expected at least one dummy run") if len(real) == 0: - self.logger.warnings("No real runs, using dummies as candidates") + self.logger.warning("No real runs, using dummies as candidates") candidates = dummies return candidates, all_discarded if better_than_dummy: self.logger.debug(f"Using {dummy_cutoff} to filter candidates") - candidates, discarded = split_by(real, by=lambda r: r.loss < dummy_cutoff) + candidates, discarded = split(real, by=lambda r: r.loss < dummy_cutoff) all_discarded.update(discarded) # If there are no real candidates left, use the dummies @@ -746,14 +735,16 @@ def candidates( else: nkeep = nbest - if nkeep is None and max_models_on_disk is not None: - nkeep = max_models_on_disk - elif nkeep is not None and max_models_on_disk < nkeep: - self.logger.warning( - f"Limiting {n_candidates} by `max_models_on_disk={max_models_on_disk}`" - f"instead of {nkeep} (set from `nbest={nbest}`)" - ) - nkeep = max_models_on_disk + if max_models_on_disk is not None: + if nkeep is None: + nkeep = max_models_on_disk + elif max_models_on_disk < nkeep: + self.logger.warning( + f"Limiting {n_candidates} by" + f"`max_models_on_disk={max_models_on_disk}`" + f"instead of {nkeep} (set from `nbest={nbest}`)" + ) + nkeep = max_models_on_disk else: nkeep = nkeep @@ -762,30 +753,39 @@ def candidates( # If we need to specify how many to keep, keep that many if nkeep is not None: - candidates, discarded = cut(candidates, at=nkeep) + candidates, discarded = cut(candidates, nkeep) all_discarded.update(discarded) self.logger.info(f"Discarding {len(discarded)}/{n_candidates} runs") # Choose which ones to discard if there's a memory limit if memory_limit is not None: - candidates, discarded = self.memory_split( - runs=candidates, - limit=memory_limit, - sort=False, # Already sorted + largest = max(candidates, key=lambda r: r.mem_usage) + cutoff = memory_limit - largest.mem_usage + + accumulated_mem_usage = accumulate(r.mem_usage for r in candidates) + cutpoint = findwhere(accumulated_mem_usage, lambda mem: mem >= cutoff) + + candidates, discarded = cut(candidates, cutpoint) + + self.logger.warning( + "Limiting num of models via `memory_limit` float" + f" memory_limit={memory_limit}" + f" cutoff={cutoff}" + f" largest={largest.mem_usage}" + f" remaining={len(candidates)}" + f" discarded={len(discarded)}" ) all_discarded.update(discarded) if performance_range_threshold is not None: - high = dummies[0].loss - low = candidates[0].loss - candidates, discarded = value_split( - candidates, - high=high, - low=low, - at=performance_range_threshold, - key=lambda run: run.loss, - sort=False, # Already sorted - ) + x = performance_range_threshold + worst = dummies[0].loss + best = candidates[0].loss + + cutoff = x * best + (1 - x) * worst + + candidates, discarded = cut(candidates, where=lambda r: r.loss >= cutoff) + all_discarded.update(discarded) return candidates, all_discarded @@ -849,11 +849,7 @@ def fit_ensemble( self.logger.debug(f"Fitting the ensemble took {duration} seconds.") return ensemble - def loss( - self, - run: Run, - kind: str = "ensemble" - ) -> float: + def loss(self, run: Run, kind: str = "ensemble") -> float: """Calculate the loss for a list of runs Parameters diff --git a/autosklearn/ensemble_building/run.py b/autosklearn/ensemble_building/run.py index d44e816f6a..86acd6b1d7 100644 --- a/autosklearn/ensemble_building/run.py +++ b/autosklearn/ensemble_building/run.py @@ -35,7 +35,7 @@ def __init__(self, path: Path) -> None: self.num_run = int(num_run) self.budget = float(budget) - self.loss: float | None = None + self.loss: float = np.inf self._mem_usage: float | None = None # Items that will be delete when the run is saved back to file diff --git a/autosklearn/util/functional.py b/autosklearn/util/functional.py index 467cf1e463..87d3e5fce7 100644 --- a/autosklearn/util/functional.py +++ b/autosklearn/util/functional.py @@ -88,7 +88,7 @@ def intersection(*items: Iterable[T]) -> set[T]: def cut( lst: Iterable[T], - at: int | Callable[[T], bool], + where: int | Callable[[T], bool], ) -> tuple[list[T], list[T]]: """Cut a list in two at a given index or predicate @@ -105,25 +105,23 @@ def cut( tuple[list[T], list[T]] The split items """ - if isinstance(at, int): + if isinstance(where, int): lst = list(lst) - return lst[:at], lst[at:] + return lst[:where], lst[where:] else: a = [] itr = iter(lst) for x in itr: - if not at(x): + if not where(x): a.append(x) break return a, [x] + list(itr) -def split_by( +def split( lst: Iterable[T], by: Callable[[T], bool], - *, - split_at_first: bool = False, ) -> tuple[list[T], list[T]]: """Split a list in two based on a predicate. @@ -139,36 +137,20 @@ def split_by( by : Callable[[T], bool] The predicate to split it on - split_at_first: bool = False - Whether to split at the first occurence of `func == True` - Returns ------- (a: list[T], b: list[T]) - a is where the func is True and b is where the func was False. If using - `split_at_first = True`, b contains everything after the first - False occurence. + a is where the func is True and b is where the func was False. """ a = [] b = [] - if split_at_first: - itr = iter(lst) - for x in itr: - if by(x): - a.append(x) - else: - break + for x in lst: + if by(x): + a.append(x) + else: + b.append(x) - return a, list(itr) # Convert remaining to list - - else: - for x in lst: - if by(x): - a.append(x) - else: - b.append(x) - - return a, b + return a, b def bound(val: float, bounds: tuple[float, float]) -> float: @@ -215,87 +197,3 @@ def findwhere(itr: Iterable[T], func: Callable[[T], bool], *, default: int = -1) The first index where func was True """ return next((i for i, t in enumerate(itr) if func(t)), default) - - -@no_type_check -def value_split( - lst: Sequence[T], - *, - key: Callable[[T], float] | None = None, - low: float | None = None, - high: float | None = None, - at: float = 0.5, - sort: bool = True, -) -> tuple[list[T], list[T]]: - """Split a list according to it's values at a certain percentage. - - Will attempt to sort the values unless specified that it should not `sort`. - The endpoints `low` and `high` are assumed to be the min and max of the sorted - `lst`. - - The value used for splitting is calculated by - - (1 - `at`) * low + `at` * high - - ..code:: python - - # min low at=0.75 high/max - # |-----|----------------|---------| - # 0 20 80 100 - # - # [----------------------][++++++++] - # split 1 split 2 - - x = np.linspace(0, 100, 21) - # [0, 5, 10, ..., 95, 100] - - lower, higher = value_split(x, at=0.6, low=20) - - print(lower, higher) - # [0, 5, 10, ..., 75] [80, ..., 100] - - Parameters - ---------- - lst : Sequence[T] - The list of items to split - - key : Callable[[T], float] | None = None - An optional key to access the values by - - low : float | None = None - The lowest value to consider, otherwise will use the minimum in lst - - high : float | None = None - The highest value to consider, otherwise will use the maximum in lst - - at : float = 0.5 - At what perecentage to split at - - sort : bool = True - Whether to sort the values, set to False if values are sorted before hand - - Returns - ------- - tuple[list[T], list[T]] - The lower and upper parts of the list based on the split - """ - if sort: - lst = sorted(lst) if key is None else sorted(lst, key=key) - - if low is None: - low = lst[0] if key is None else key(lst[0]) - - if high is None: - high = lst[-1] if key is None else key(lst[-1]) - - # Convex combination of two points - pivot_value = (1 - at) * low + (at) * high - - if key is None: - greater_than_pivot = lambda x: x >= pivot_value - else: - greater_than_pivot = lambda x: key(x) >= pivot_value - - pivot_idx = findwhere(lst, greater_than_pivot, default=len(lst)) - - return lst[:pivot_idx], lst[pivot_idx:] diff --git a/pyproject.toml b/pyproject.toml index 44cb62cd3b..3154faedfe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -95,8 +95,6 @@ module = [ "autosklearn.automl", "autosklearn.smbo", "autosklearn.experimental.askl2", - "autosklearn.ensemble_building.builder", - "autosklearn.ensemble_building.manager", "autosklearn.ensembles.singlebest_ensemble", "autosklearn.ensembles.ensemble_selection", "autosklearn.evaluation", #__init__ From fa6146b9c875a0de04afaa3523c8944210f60f5e Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 8 Apr 2022 15:45:00 +0200 Subject: [PATCH 060/117] Mypy fixes --- autosklearn/ensemble_building/builder.py | 54 ++++++------------------ autosklearn/ensemble_building/manager.py | 26 +++++------- autosklearn/util/functional.py | 2 +- 3 files changed, 24 insertions(+), 58 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index c339610107..9fbdfcbf56 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -23,13 +23,7 @@ from autosklearn.ensemble_building.run import Run, RunID from autosklearn.ensembles.ensemble_selection import EnsembleSelection from autosklearn.metrics import Scorer, calculate_loss, calculate_score -from autosklearn.util.functional import ( - bound, - cut, - findwhere, - intersection, - split, -) +from autosklearn.util.functional import bound, cut, findwhere, intersection, split from autosklearn.util.logging_ import get_named_client_logger from autosklearn.util.parallel import preload_modules @@ -160,7 +154,7 @@ def __init__( self.start_time: float = 0.0 # Track the ensemble performance - self.ensemble_history: list = [] + self.ensemble_history: list[dict[str, Any]] = [] # Keep running knowledge of its validation performance self.validation_performance_ = np.inf @@ -236,13 +230,7 @@ def run( end_at: float | None = None, time_buffer: int = 5, return_predictions: bool = False, - ) -> tuple[ - list[dict[str, Any]], - int | float, - np.ndarray | None, - np.ndarray | None, - np.ndarray | None, - ]: + ) -> tuple[list[dict[str, Any]], int | float]: """Run the ensemble building process Parameters @@ -354,24 +342,18 @@ def run( "Memory Exception -- restart with " "less ensemble_nbest: %d" % self.ensemble_nbest ) - return [], self.ensemble_nbest, None, None, None + return [], self.ensemble_nbest else: return safe_ensemble_script.result - return [], self.ensemble_nbest, None, None, None + return [], self.ensemble_nbest def main( self, time_left: float, iteration: int, return_predictions: bool = False, - ) -> tuple[ - list[dict[str, Any]], - int | float, - np.ndarray | None, - np.ndarray | None, - np.ndarray | None, - ]: + ) -> tuple[list[dict[str, Any]], int | float]: """Run the main loop of ensemble building Parameters @@ -387,7 +369,7 @@ def main( Returns ------- - (ensemble_history, nbest, train_preds, valid_preds, test_preds) + (ensemble_history: list[dict[str, Any]], nbest: int | float) """ # Pynisher jobs inside dask 'forget' the logger configuration. # So we have to set it up accordingly @@ -397,7 +379,6 @@ def main( ) self.start_time = time.time() - train_pred, valid_pred, test_pred = None, None, None used_time = time.time() - self.start_time left_for_iter = time_left - used_time @@ -406,7 +387,7 @@ def main( # Can't load data, exit early if not os.path.exists(self.backend._get_targets_ensemble_filename()): self.logger.debug(f"No targets for ensemble: {traceback.format_exc()}") - return self.ensemble_history, self.ensemble_nbest, None, None, None + return self.ensemble_history, self.ensemble_nbest # Load in information from previous candidates and also runs available_runs = self.available_runs() @@ -420,7 +401,7 @@ def main( if len(runs) == 0: self.logger.debug("Found no runs") - return self.ensemble_history, self.ensemble_nbest, None, None, None + return self.ensemble_history, self.ensemble_nbest # Calculate the loss for those that require it requires_update = self.requires_loss_update(runs, limit=self.read_at_most) @@ -449,7 +430,7 @@ def main( if len(candidates) == 0: self.logger.debug("No viable candidates found for ensemble building") - return self.ensemble_history, self.ensemble_nbest, None, None, None + return self.ensemble_history, self.ensemble_nbest # Get a set representation of them as we will begin doing intersections # Not here that valid_set and test_set are both subsets of candidates_set @@ -459,7 +440,7 @@ def main( if len(valid_set & test_set) == 0 and len(test_set) > 0 and len(valid_set) > 0: self.logger.error("valid_set and test_set not empty but do not overlap") - return self.ensemble_history, self.ensemble_nbest, None, None, None + return self.ensemble_history, self.ensemble_nbest # Find the intersect between the most groups and use that to fit the ensemble intersect = intersection(candidates_set, valid_set, test_set) @@ -525,7 +506,7 @@ def main( if not any(different_candidates) or any(updated_candidates): self.logger.info("All ensemble candidates the same, no update required") - return self.ensemble_history, self.ensemble_nbest, None, None, None + return self.ensemble_history, self.ensemble_nbest targets = cast(np.ndarray, self.targets("ensemble")) # Sure they exist ensemble = self.fit_ensemble( @@ -586,16 +567,7 @@ def main( performance_stamp[f"ensemble_{score_name}_score"] = score self.ensemble_history.append(performance_stamp) - if return_predictions: - return ( - self.ensemble_history, - self.ensemble_nbest, - train_pred, - valid_pred, - test_pred, - ) - else: - return self.ensemble_history, self.ensemble_nbest, None, None, None + return self.ensemble_history, self.ensemble_nbest def requires_loss_update(self, runs: Sequence[Run], limit: int | None) -> list[Run]: """ diff --git a/autosklearn/ensemble_building/manager.py b/autosklearn/ensemble_building/manager.py index 8e9a3c2aba..bef0552e1f 100644 --- a/autosklearn/ensemble_building/manager.py +++ b/autosklearn/ensemble_building/manager.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any, Optional, Tuple, Union +from typing import Any, Optional, Union import logging.handlers import time @@ -130,10 +130,10 @@ def __init__( self.pynisher_context = pynisher_context # Store something similar to SMAC's runhistory - self.history = [] + self.history: list[dict[str, Any]] = [] # We only submit new ensembles when there is not an active ensemble job - self.futures = [] + self.futures: list[dask.distributed.Future] = [] # The last criteria is the number of iterations self.iteration = 0 @@ -147,7 +147,7 @@ def __call__( run_info: RunInfo, result: RunValue, time_left: float, - ): + ) -> None: """ Returns ------- @@ -157,7 +157,8 @@ def __call__( """ if result.status in (StatusType.STOP, StatusType.ABORT) or smbo._stop: return - self.build_ensemble(smbo.tae_runner.client) + client = getattr(smbo.tae_runner, "client") + self.build_ensemble(client) def build_ensemble( self, @@ -267,7 +268,7 @@ def build_ensemble( def fit_and_return_ensemble( backend: Backend, dataset_name: str, - task_type: str, + task_type: int, metric: Scorer, ensemble_size: int, ensemble_nbest: int, @@ -282,13 +283,7 @@ def fit_and_return_ensemble( logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, memory_limit: Optional[int] = None, random_state: Optional[Union[int, np.random.RandomState]] = None, -) -> Tuple[ - list[dict[str, Any]], - int, - np.ndarray | None, - np.ndarray | None, - np.ndarray | None, -]: +) -> tuple[list[dict[str, Any]], int | float]: """ A short function to fit and create an ensemble. It is just a wrapper to easily send @@ -362,9 +357,8 @@ def fit_and_return_ensemble( Returns ------- - List[Tuple[int, float, float, float]] - A list with the performance history of this ensemble, of the form - [(pandas_timestamp, train_performance, val_performance, test_performance)] + (ensemble_history: list[dict[str, Any]], nbest: int | float) + The ensemble history and the nbest chosen members """ result = EnsembleBuilder( backend=backend, diff --git a/autosklearn/util/functional.py b/autosklearn/util/functional.py index 87d3e5fce7..8609539871 100644 --- a/autosklearn/util/functional.py +++ b/autosklearn/util/functional.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Callable, Iterable, Sequence, TypeVar, no_type_check +from typing import Callable, Iterable, TypeVar from functools import reduce From 41711c2f14aed819e1befcd1dcf5aba4d695977b Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 8 Apr 2022 18:17:05 +0200 Subject: [PATCH 061/117] Tests for `Runs` --- autosklearn/ensemble_building/builder.py | 2 +- autosklearn/ensemble_building/run.py | 23 +-- test/test_ensemble_builder/test_run.py | 202 +++++++++++++++++++++++ 3 files changed, 215 insertions(+), 12 deletions(-) create mode 100644 test/test_ensemble_builder/test_run.py diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 9fbdfcbf56..fe197dccec 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -587,7 +587,7 @@ def requires_loss_update(self, runs: Sequence[Run], limit: int | None) -> list[R if run.loss is None or run.loss == np.inf: queue.append(run) - elif run.loss is not None and run.pred_modified("ensemble"): + elif run.loss is not None and run.was_modified(): self.logger.debug(f"{run.id} had its predictions modified?") run.record_modified_times() # re-mark modfied times queue.append(run) diff --git a/autosklearn/ensemble_building/run.py b/autosklearn/ensemble_building/run.py index 86acd6b1d7..150e9df1dd 100644 --- a/autosklearn/ensemble_building/run.py +++ b/autosklearn/ensemble_building/run.py @@ -57,17 +57,14 @@ def is_dummy(self) -> bool: """Whether this run is a dummy run or not""" return self.num_run == 1 - def pred_modified(self, kind: str) -> bool: + def was_modified(self) -> bool: """Query for when the ens file was last modified""" - if kind not in self.recorded_mtimes: - raise ValueError(f"Run has no recorded time for {kind}: {self}") + recorded = self.recorded_mtimes.get("ensemble") + last = self.pred_path().stat().st_mtime + print(recorded, last) + return recorded != last - recorded = self.recorded_mtimes[kind] - last = self.pred_path(kind).stat().st_mtime - - return recorded == last - - def pred_path(self, kind: str) -> Path: + def pred_path(self, kind: str = "ensemble") -> Path: """Get the path to certain predictions""" fname = f"predictions_{kind}_{self.seed}_{self.num_run}_{self.budget}.npy" return self.dir / fname @@ -82,7 +79,7 @@ def record_modified_times(self) -> None: def predictions( self, - kind: str, + kind: str = "ensemble", precision: int | None = None, ) -> np.ndarray: """Load the predictions for this run @@ -122,7 +119,11 @@ def predictions( return predictions def unload_cache(self) -> None: - """Removes the cache from this object""" + """Removes the cache from this object + + We could also enforce that nothing gets pickled to disk with __getstate__ + but this is simpler and shows expliciyt behaviour in caller code. + """ self._cache = {} @property diff --git a/test/test_ensemble_builder/test_run.py b/test/test_ensemble_builder/test_run.py new file mode 100644 index 0000000000..108b97c17d --- /dev/null +++ b/test/test_ensemble_builder/test_run.py @@ -0,0 +1,202 @@ +from __future__ import annotations + +from typing import Callable + +import math +import pickle +import time +from pathlib import Path + +import numpy as np + +from autosklearn.ensemble_building.run import Run + +from pytest_cases import fixture, parametrize + +from test.conftest import DEFAULT_SEED + + +@fixture +def make_run(tmp_path: Path) -> Callable[..., Run]: + def _make( + id: int | None = 2, + seed: int = DEFAULT_SEED, + budget: float = 0.0, + loss: float | None = None, + model_size: int | None = None, + predictions: list[str] | dict[str, np.ndarray] | None = None, + ) -> Run: + model_id = f"{seed}_{id}_{budget}" + dir = tmp_path / model_id + + if not dir.exists(): + dir.mkdir() + + # Populate if None + if predictions is None: + predictions = ["ensemble", "valid", "test"] + + # Convert to dict + if isinstance(predictions, list): + dummy = np.asarray([[0]]) + predictions = {kind: dummy for kind in predictions} + + # Write them + if isinstance(predictions, dict): + for kind, val in predictions.items(): + fname = f"predictions_{kind}_{seed}_{id}_{budget}.npy" + with (dir / fname).open("wb") as f: + np.save(f, val) + + run = Run(dir) + + if loss is not None: + run.loss = loss + + # MB + if model_size is not None: + n_bytes = int(model_size * math.pow(1024, 2)) + model_path = dir / f"{seed}.{id}.{budget}.model" + with model_path.open("wb") as f: + f.write(bytearray(n_bytes)) + + return run + + return _make + + +def test_is_dummy(make_run: Callable[..., Run]) -> None: + """ + Expects + ------- + * We expect runs with an num_run (id) of 1 to be a dummy + """ + run = make_run(id=1) + assert run.is_dummy() + + run = make_run(id=2) + assert not run.is_dummy() + + +def test_was_modified(make_run: Callable[..., Run]) -> None: + """ + Expects + ------- + * Should properly indicate when a file was modified + """ + run = make_run() + assert not run.was_modified() + + time.sleep(0.2) # Just to give some time after creation + path = run.pred_path("ensemble") + path.touch() + + assert run.was_modified() + + +def test_record_modified_times_with_was_modified(make_run: Callable[..., Run]) -> None: + """ + Expects + ------- + * Updating the recorded times should not trigger `was_modified` + * Should update the recorded times so `was_modified` will give False after being updated + """ + run = make_run() + path = run.pred_path("ensemble") + + time.sleep(0.2) + run.record_modified_times() + assert not run.was_modified() + + time.sleep(0.2) + path.touch() + assert run.was_modified() + + time.sleep(0.2) + run.record_modified_times() + assert not run.was_modified() + + +def test_predictions_pickled(make_run: Callable[..., Run]) -> None: + """ + Expects + ------- + * Should be able to load pickled predictions + """ + run = make_run(predictions=[]) + x = np.array([0]) + + path = run.pred_path("ensemble") + with path.open("wb") as f: + pickle.dump(x, f) + + assert run.predictions("ensemble") is not None + + +@parametrize( + "precision, expected", [(16, np.float16), (32, np.float32), (64, np.float64)] +) +def test_predictions_precision( + make_run: Callable[..., Run], + precision: int, + expected: type +) -> None: + """ + Expects + ------- + * Loading predictions with a given precision should load the expected type + """ + run = make_run() + assert run.predictions(precision=precision).dtype == expected + + +def test_caching(make_run: Callable[..., Run]) -> None: + """ + Expects + ------- + * Attempting to load the same predictions again will cause the result to be cached + * Unloading the cache will cause it to reload and reread the predictions + """ + run = make_run() + + path = run.pred_path() + before_access = path.stat().st_atime_ns + + time.sleep(0.01) + _ = run.predictions() # Should cache result + load_access = path.stat().st_atime_ns + + # We test that it was not loaded from disk by checking when it was last accessed + assert before_access != load_access + + time.sleep(0.01) + _ = run.predictions() # Should use cache result + cache_access = path.stat().st_atime_ns + + assert cache_access == load_access + + run.unload_cache() + + time.sleep(0.01) + _ = run.predictions() # Should have reloaded it + reloaded_access = path.stat().st_atime_ns + + assert reloaded_access != cache_access + + +def test_equality(make_run: Callable[..., Run]) -> None: + """ + Expects + ------- + * Two runs with the same id's should be considered equal + * Otherwise, they should be considered different + """ + r1 = make_run(id=1, budget=49.3, seed=3) + r2 = make_run(id=1, budget=49.3, seed=3) + + assert r1 == r2 + + r3 = make_run(id=1, budget=0.0, seed=3) + + assert r1 != r3 + assert r2 != r3 From 84b96189ca1db1ab06408bca791bae5805b858d2 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 8 Apr 2022 18:22:34 +0200 Subject: [PATCH 062/117] Move `make_run` to fixtures --- autosklearn/ensemble_building/__init__.py | 3 +- test/fixtures/ensemble_building.py | 63 ++++++++++++++++++++++ test/test_ensemble_builder/test_run.py | 64 ++--------------------- 3 files changed, 70 insertions(+), 60 deletions(-) create mode 100644 test/fixtures/ensemble_building.py diff --git a/autosklearn/ensemble_building/__init__.py b/autosklearn/ensemble_building/__init__.py index 95ba64e83e..4c63165e1b 100644 --- a/autosklearn/ensemble_building/__init__.py +++ b/autosklearn/ensemble_building/__init__.py @@ -1,4 +1,5 @@ from autosklearn.ensemble_building.builder import EnsembleBuilder from autosklearn.ensemble_building.manager import EnsembleBuilderManager +from autosklearn.ensemble_building.run import Run -__all__ = ["EnsembleBuilder", "EnsembleBuilderManager"] +__all__ = ["EnsembleBuilder", "EnsembleBuilderManager", "Run"] diff --git a/test/fixtures/ensemble_building.py b/test/fixtures/ensemble_building.py new file mode 100644 index 0000000000..af6bf2d865 --- /dev/null +++ b/test/fixtures/ensemble_building.py @@ -0,0 +1,63 @@ +from __future__ import annotations + +from typing import Callable + +import math +from pathlib import Path + +import numpy as np + +from autosklearn.ensemble_building import Run + +from pytest_cases import fixture + +from test.conftest import DEFAULT_SEED + + +@fixture +def make_run(tmp_path: Path) -> Callable[..., Run]: + def _make( + id: int | None = 2, + seed: int = DEFAULT_SEED, + budget: float = 0.0, + loss: float | None = None, + model_size: int | None = None, + predictions: list[str] | dict[str, np.ndarray] | None = None, + ) -> Run: + model_id = f"{seed}_{id}_{budget}" + dir = tmp_path / model_id + + if not dir.exists(): + dir.mkdir() + + # Populate if None + if predictions is None: + predictions = ["ensemble", "valid", "test"] + + # Convert to dict + if isinstance(predictions, list): + dummy = np.asarray([[0]]) + predictions = {kind: dummy for kind in predictions} + + # Write them + if isinstance(predictions, dict): + for kind, val in predictions.items(): + fname = f"predictions_{kind}_{seed}_{id}_{budget}.npy" + with (dir / fname).open("wb") as f: + np.save(f, val) + + run = Run(dir) + + if loss is not None: + run.loss = loss + + # MB + if model_size is not None: + n_bytes = int(model_size * math.pow(1024, 2)) + model_path = dir / f"{seed}.{id}.{budget}.model" + with model_path.open("wb") as f: + f.write(bytearray(n_bytes)) + + return run + + return _make diff --git a/test/test_ensemble_builder/test_run.py b/test/test_ensemble_builder/test_run.py index 108b97c17d..34c37dc0c6 100644 --- a/test/test_ensemble_builder/test_run.py +++ b/test/test_ensemble_builder/test_run.py @@ -2,67 +2,14 @@ from typing import Callable -import math import pickle import time -from pathlib import Path import numpy as np -from autosklearn.ensemble_building.run import Run +from autosklearn.ensemble_building import Run -from pytest_cases import fixture, parametrize - -from test.conftest import DEFAULT_SEED - - -@fixture -def make_run(tmp_path: Path) -> Callable[..., Run]: - def _make( - id: int | None = 2, - seed: int = DEFAULT_SEED, - budget: float = 0.0, - loss: float | None = None, - model_size: int | None = None, - predictions: list[str] | dict[str, np.ndarray] | None = None, - ) -> Run: - model_id = f"{seed}_{id}_{budget}" - dir = tmp_path / model_id - - if not dir.exists(): - dir.mkdir() - - # Populate if None - if predictions is None: - predictions = ["ensemble", "valid", "test"] - - # Convert to dict - if isinstance(predictions, list): - dummy = np.asarray([[0]]) - predictions = {kind: dummy for kind in predictions} - - # Write them - if isinstance(predictions, dict): - for kind, val in predictions.items(): - fname = f"predictions_{kind}_{seed}_{id}_{budget}.npy" - with (dir / fname).open("wb") as f: - np.save(f, val) - - run = Run(dir) - - if loss is not None: - run.loss = loss - - # MB - if model_size is not None: - n_bytes = int(model_size * math.pow(1024, 2)) - model_path = dir / f"{seed}.{id}.{budget}.model" - with model_path.open("wb") as f: - f.write(bytearray(n_bytes)) - - return run - - return _make +from pytest_cases import parametrize def test_is_dummy(make_run: Callable[..., Run]) -> None: @@ -99,7 +46,8 @@ def test_record_modified_times_with_was_modified(make_run: Callable[..., Run]) - Expects ------- * Updating the recorded times should not trigger `was_modified` - * Should update the recorded times so `was_modified` will give False after being updated + * Should update the recorded times so `was_modified` will give False after being + updated """ run = make_run() path = run.pred_path("ensemble") @@ -137,9 +85,7 @@ def test_predictions_pickled(make_run: Callable[..., Run]) -> None: "precision, expected", [(16, np.float16), (32, np.float32), (64, np.float64)] ) def test_predictions_precision( - make_run: Callable[..., Run], - precision: int, - expected: type + make_run: Callable[..., Run], precision: int, expected: type ) -> None: """ Expects From 4dafa0d903d386ba869abd2b117eb34446423e77 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sat, 9 Apr 2022 19:59:10 +0200 Subject: [PATCH 063/117] Fix run deletion --- autosklearn/ensemble_building/builder.py | 149 ++++++++++++++--------- 1 file changed, 91 insertions(+), 58 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index fe197dccec..87e3837302 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -147,9 +147,17 @@ def __init__( self.dataset_name = dataset_name self.ensemble_size = ensemble_size self.ensemble_nbest = ensemble_nbest - self.max_models_on_disc = max_models_on_disc self.performance_range_threshold = performance_range_threshold + # Decide if self.max_models_on_disk is a memory limit or model limit + self.max_models_on_disc: int | None = None + self.model_memory_limit: float | None = None + + if isinstance(max_models_on_disc, int): + self.max_models_on_disk = self.max_models_on_disc + elif isinstance(self.max_models_on_disc, float): + self.model_memory_limit = self.max_models_on_disc + # The starting time of the procedure self.start_time: float = 0.0 @@ -356,6 +364,14 @@ def main( ) -> tuple[list[dict[str, Any]], int | float]: """Run the main loop of ensemble building + The process is: + * Load all available runs + previous candidates (if any) + * Update the loss of those that require + * From these runs, get a list of candidates + * Save candidates + * Delete models that are not candidates + * Build an ensemble from the candidates if there are new candidates + Parameters ---------- time_left : float @@ -408,23 +424,16 @@ def main( for run in requires_update: run.loss = self.loss(run, kind="ensemble") - # Decide if self.max_models_on_disk is an - if isinstance(self.max_models_on_disc, int): - max_models_on_disk = self.max_models_on_disc - memory_limit = None - elif isinstance(self.max_models_on_disc, float): - max_models_on_disk = None - memory_limit = self.max_models_on_disc - else: - max_models_on_disk = None - memory_limit = None + runs_keep, runs_delete = self.requires_deletion( + runs, + max_models=self.max_models_on_disk, + memory_limit=self.model_memory_limit, + ) candidates, all_discarded = self.candidates( - runs=runs, + runs_keep, better_than_dummy=True, nbest=self.ensemble_nbest, - max_models_on_disk=max_models_on_disk, - memory_limit=memory_limit, performance_range_threshold=self.performance_range_threshold, ) @@ -486,8 +495,8 @@ def main( pickle.dump({run.id: run for run in candidates}, f) # Delete files for models which were not considered candidates - if len(discarded) > 0: - for run in discarded: + if any(runs_delete): + for run in runs_delete: if not run.is_dummy(): try: shutil.rmtree(run.dir) @@ -603,8 +612,6 @@ def candidates( *, better_than_dummy: bool = False, nbest: int | float | None = None, - max_models_on_disk: int | None = None, - memory_limit: float | None = None, performance_range_threshold: float | None = None, ) -> tuple[list[Run], set[Run]]: """Get a list of candidates from `runs` @@ -628,12 +635,7 @@ def candidates( The nbest models to select. If `int`, acts as an absolute limit. If `float`, acts as a percentage of available candidates. - max_models_on_disk : int | None - The maximum amount of models allowed on disk. If the number of candidates - exceed this limit after previous filters applied, this will further - reduce the candidates. - - memory_limit : float | None + model_memory_limit : float | None A maximum memory limit in MB for the runs to occupy. If the candidates at this point exceed this limit, the best n candidates that fit into this limit will be chosen. @@ -707,48 +709,14 @@ def candidates( else: nkeep = nbest - if max_models_on_disk is not None: - if nkeep is None: - nkeep = max_models_on_disk - elif max_models_on_disk < nkeep: - self.logger.warning( - f"Limiting {n_candidates} by" - f"`max_models_on_disk={max_models_on_disk}`" - f"instead of {nkeep} (set from `nbest={nbest}`)" - ) - nkeep = max_models_on_disk - else: - nkeep = nkeep - # Sort the candidates so that they ordered by best loss, using num_run for tie candidates = sorted(candidates, key=lambda r: (r.loss, r.num_run)) - # If we need to specify how many to keep, keep that many if nkeep is not None: candidates, discarded = cut(candidates, nkeep) all_discarded.update(discarded) self.logger.info(f"Discarding {len(discarded)}/{n_candidates} runs") - # Choose which ones to discard if there's a memory limit - if memory_limit is not None: - largest = max(candidates, key=lambda r: r.mem_usage) - cutoff = memory_limit - largest.mem_usage - - accumulated_mem_usage = accumulate(r.mem_usage for r in candidates) - cutpoint = findwhere(accumulated_mem_usage, lambda mem: mem >= cutoff) - - candidates, discarded = cut(candidates, cutpoint) - - self.logger.warning( - "Limiting num of models via `memory_limit` float" - f" memory_limit={memory_limit}" - f" cutoff={cutoff}" - f" largest={largest.mem_usage}" - f" remaining={len(candidates)}" - f" discarded={len(discarded)}" - ) - all_discarded.update(discarded) - if performance_range_threshold is not None: x = performance_range_threshold worst = dummies[0].loss @@ -760,6 +728,11 @@ def candidates( all_discarded.update(discarded) + # Ensure there's always at least one candidate + if not any(candidates): + sorted_discarded = sorted(all_discarded, key=lambda r: r.loss) + candidates, all_discarded = sorted_discarded[:1], set(sorted_discarded[1:]) + return candidates, all_discarded def fit_ensemble( @@ -821,6 +794,66 @@ def fit_ensemble( self.logger.debug(f"Fitting the ensemble took {duration} seconds.") return ensemble + def requires_deletion( + self, + runs: Sequence[Run], + *, + max_models: int | None = None, + memory_limit: float | None = None, + ) -> tuple[list[Run], set[Run]]: + """Cut a list of runs into those to keep and those to delete + + If neither params are specified, this method should do nothing. + + Parameters + ---------- + runs : Sequence[Run] + The runs to check + + max_models : int | None = None + The maximum amount of models to have on disk. Leave `None` for no effect + + memory_limit : float | None = None + The memory limit in MB, leave `None` for no effect + + Returns + ------- + (keep: list[Run], delete: set[Run]) + The list of runs to keep and those to delete + """ + if memory_limit is None and max_models is None: + return list(runs), set() + + # Start with keep all runs and deleteing None + keep = sorted(runs, key=lambda r: (r.loss, r.num_run)) + delete: set[Run] = set() + + if max_models is not None and max_models > len(runs): + keep, to_delete = cut(keep, max_models) + delete.update(to_delete) + + if memory_limit is not None: + largest = max(runs, key=lambda r: r.mem_usage) + cutoff = memory_limit - largest.mem_usage + + accumulated_mem_usage = accumulate(r.mem_usage for r in runs) + + cutpoint = findwhere(accumulated_mem_usage, lambda mem: mem >= cutoff) + keep, to_delete = cut(keep, cutpoint) + + if any(to_delete): + self.logger.warning( + "Limiting num of models via `memory_limit`" + f" memory_limit={memory_limit}" + f" cutoff={cutoff}" + f" largest={largest.mem_usage}" + f" remaining={len(keep)}" + f" discarded={len(to_delete)}" + ) + delete.update(to_delete) + + return keep, delete + def loss(self, run: Run, kind: str = "ensemble") -> float: """Calculate the loss for a list of runs From db322f326fff208cdb3bf8142f026552927fedaa Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 10 Apr 2022 17:47:25 +0200 Subject: [PATCH 064/117] Test candidates --- autosklearn/ensemble_building/builder.py | 48 ++- autosklearn/ensemble_building/run.py | 4 +- autosklearn/util/functional.py | 47 ++- test/fixtures/backend.py | 7 +- test/fixtures/ensemble_building.py | 95 ++++- .../test_ensemble_builder.py | 326 ++++++++++++++++++ .../test_ensemble_builder_mock_data.py | 97 ++---- 7 files changed, 527 insertions(+), 97 deletions(-) create mode 100644 test/test_ensemble_builder/test_ensemble_builder.py diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 87e3837302..28f1d7aa31 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -420,8 +420,12 @@ def main( return self.ensemble_history, self.ensemble_nbest # Calculate the loss for those that require it - requires_update = self.requires_loss_update(runs, limit=self.read_at_most) + requires_update = self.requires_loss_update(runs) + if self.read_at_most: + requires_update = requires_update[: self.read_at_most] + for run in requires_update: + run.record_modified_times() # So we don't count as modified next time run.loss = self.loss(run, kind="ensemble") runs_keep, runs_delete = self.requires_deletion( @@ -578,7 +582,10 @@ def main( return self.ensemble_history, self.ensemble_nbest - def requires_loss_update(self, runs: Sequence[Run], limit: int | None) -> list[Run]: + def requires_loss_update( + self, + runs: Sequence[Run], + ) -> list[Run]: """ Parameters @@ -592,19 +599,15 @@ def requires_loss_update(self, runs: Sequence[Run], limit: int | None) -> list[R The runs that require a loss to be calculated """ queue = [] - for run in runs: - if run.loss is None or run.loss == np.inf: + for run in sorted(runs, key=lambda run: run.recorded_mtimes["ensemble"]): + if run.loss == np.inf: queue.append(run) - elif run.loss is not None and run.was_modified(): + elif run.was_modified(): self.logger.debug(f"{run.id} had its predictions modified?") - run.record_modified_times() # re-mark modfied times queue.append(run) - if limit is not None: - return queue[:limit] - else: - return queue + return queue def candidates( self, @@ -671,15 +674,17 @@ def candidates( return candidates, all_discarded # Further split the candidates into those that are real and dummies - dummies, real = split(candidates, by=lambda r: r.is_dummy()) - dummies = sorted(dummies, key=lambda r: r.loss) - dummy_cutoff = dummies[0].loss + dummies, candidates = split(candidates, by=lambda r: r.is_dummy()) + n_real = len(candidates) if len(dummies) == 0: self.logger.error("Expected at least one dummy run") - raise RuntimeError("Expected at least one dummy run") + raise ValueError("Expected at least one dummy run") - if len(real) == 0: + dummies = sorted(dummies, key=lambda r: r.loss) + dummy_cutoff = dummies[0].loss + + if len(candidates) == 0: self.logger.warning("No real runs, using dummies as candidates") candidates = dummies return candidates, all_discarded @@ -687,16 +692,19 @@ def candidates( if better_than_dummy: self.logger.debug(f"Using {dummy_cutoff} to filter candidates") - candidates, discarded = split(real, by=lambda r: r.loss < dummy_cutoff) + candidates, discarded = split( + candidates, + by=lambda r: r.loss < dummy_cutoff, + ) all_discarded.update(discarded) # If there are no real candidates left, use the dummies if len(candidates) == 0: candidates = dummies - if len(real) > 0: + if n_real > 0: self.logger.warning( "No models better than random - using Dummy loss!" - f"\n\tNumber of models besides current dummy model: {len(real)}" + f"\n\tNumber of models besides current dummy model: {n_real}" f"\n\tNumber of dummy models: {len(dummies)}", ) @@ -706,8 +714,10 @@ def candidates( nkeep: int | None if isinstance(nbest, float): nkeep = int(bound(n_candidates * nbest, bounds=(1, n_candidates))) + elif isinstance(nbest, int): + nkeep = int(bound(nbest, bounds=(1, n_candidates))) else: - nkeep = nbest + nkeep = None # Sort the candidates so that they ordered by best loss, using num_run for tie candidates = sorted(candidates, key=lambda r: (r.loss, r.num_run)) diff --git a/autosklearn/ensemble_building/run.py b/autosklearn/ensemble_building/run.py index 150e9df1dd..3297205b45 100644 --- a/autosklearn/ensemble_building/run.py +++ b/autosklearn/ensemble_building/run.py @@ -61,7 +61,6 @@ def was_modified(self) -> bool: """Query for when the ens file was last modified""" recorded = self.recorded_mtimes.get("ensemble") last = self.pred_path().stat().st_mtime - print(recorded, last) return recorded != last def pred_path(self, kind: str = "ensemble") -> Path: @@ -134,5 +133,8 @@ def id(self) -> RunID: def __hash__(self) -> int: return hash(self.id) + def __repr__(self) -> str: + return f"Run(id={self.id}, loss={self.loss})" + def __eq__(self, other: object) -> bool: return isinstance(other, Run) and other.id == self.id diff --git a/autosklearn/util/functional.py b/autosklearn/util/functional.py index 8609539871..1e1a94e207 100644 --- a/autosklearn/util/functional.py +++ b/autosklearn/util/functional.py @@ -3,6 +3,7 @@ from typing import Callable, Iterable, TypeVar from functools import reduce +from itertools import tee, chain import numpy as np @@ -87,7 +88,7 @@ def intersection(*items: Iterable[T]) -> set[T]: def cut( - lst: Iterable[T], + itr: Iterable[T], where: int | Callable[[T], bool], ) -> tuple[list[T], list[T]]: """Cut a list in two at a given index or predicate @@ -106,17 +107,18 @@ def cut( The split items """ if isinstance(where, int): - lst = list(lst) + lst = list(itr) return lst[:where], lst[where:] else: a = [] - itr = iter(lst) - for x in itr: + itr2 = iter(itr) + for x in itr2: if not where(x): a.append(x) + else: break - return a, [x] + list(itr) + return a, [x] + list(itr2) def split( @@ -197,3 +199,38 @@ def findwhere(itr: Iterable[T], func: Callable[[T], bool], *, default: int = -1) The first index where func was True """ return next((i for i, t in enumerate(itr) if func(t)), default) + + +def pairs(itr: Iterable[T]) -> Iterable[tuple[T, T]]: + """An iterator over pairs of items in the iterator + + ..code:: python + + # Check if sorted + if all(a < b for a, b in pairs(items)): + ... + + Parameters + ---------- + itr : Iterable[T] + An itr of items + + Returns + ------- + Iterable[tuple[T, T]] + An itr of sequential pairs of the items + """ + itr1, itr2 = tee(itr) + + # Skip first item + _ = next(itr2) + + # Check there is a second element + peek = next(itr2, None) + if peek is None: + raise ValueError("Can't create a pair from iterable with 1 item") + + # Put it back in + itr2 = chain([peek], itr2) + + return iter((a, b) for a, b in zip(itr1, itr2)) diff --git a/test/fixtures/backend.py b/test/fixtures/backend.py index 59a1d99f0f..cf88730f86 100644 --- a/test/fixtures/backend.py +++ b/test/fixtures/backend.py @@ -32,7 +32,7 @@ def tmp_dir(tmp_path: Path) -> str: @fixture -def make_backend() -> Callable[..., Backend]: +def make_backend(tmp_path: Path) -> Callable[..., Backend]: """Make a backend Parameters @@ -50,9 +50,12 @@ def make_backend() -> Callable[..., Backend]: """ # TODO redo once things use paths def _make( - path: str | Path, + path: str | Path | None = None, template: Path | Backend | None = None, ) -> Backend: + if path is None: + path = tmp_path / "backend" + _path = Path(path) if not isinstance(path, Path) else path assert not _path.exists(), "Try passing path / 'backend'" diff --git a/test/fixtures/ensemble_building.py b/test/fixtures/ensemble_building.py index af6bf2d865..c0471da600 100644 --- a/test/fixtures/ensemble_building.py +++ b/test/fixtures/ensemble_building.py @@ -1,13 +1,19 @@ from __future__ import annotations -from typing import Callable +from typing import Any, Callable +import sys import math +import pickle from pathlib import Path import numpy as np -from autosklearn.ensemble_building import Run +from autosklearn.automl_common.common.utils.backend import Backend +from autosklearn.constants import BINARY_CLASSIFICATION +from autosklearn.data.xy_data_manager import XYDataManager +from autosklearn.ensemble_building import EnsembleBuilder, Run +from autosklearn.metrics import Scorer, accuracy from pytest_cases import fixture @@ -17,27 +23,46 @@ @fixture def make_run(tmp_path: Path) -> Callable[..., Run]: def _make( - id: int | None = 2, + id: int | None = None, + dummy: bool = False, + backend: Backend | None = None, seed: int = DEFAULT_SEED, + modified: bool = False, budget: float = 0.0, loss: float | None = None, model_size: int | None = None, - predictions: list[str] | dict[str, np.ndarray] | None = None, + mem_usage: float | None = None, + predictions: str | list[str] | dict[str, np.ndarray] | None = "ensemble", ) -> Run: + if dummy: + assert id is None + id = 1 + loss = loss if loss is not None else 50_000 + + if id is None: + id = np.random.randint(sys.maxsize) + model_id = f"{seed}_{id}_{budget}" - dir = tmp_path / model_id + + # Use this backend to set things up + if backend is not None: + runsdir = Path(backend.get_runs_directory()) + else: + runsdir = tmp_path + + dir = runsdir / model_id if not dir.exists(): dir.mkdir() # Populate if None - if predictions is None: - predictions = ["ensemble", "valid", "test"] + if isinstance(predictions, str): + predictions = [predictions] # Convert to dict if isinstance(predictions, list): - dummy = np.asarray([[0]]) - predictions = {kind: dummy for kind in predictions} + preds = np.asarray([[0]]) + predictions = {kind: preds for kind in predictions} # Write them if isinstance(predictions, dict): @@ -48,9 +73,17 @@ def _make( run = Run(dir) + if modified: + assert predictions is not None, "Can only modify if predictions" + for k, v in run.recorded_mtimes.items(): + run.recorded_mtimes[k] = v + 1e-4 + if loss is not None: run.loss = loss + if mem_usage is not None: + run._mem_usage = mem_usage + # MB if model_size is not None: n_bytes = int(model_size * math.pow(1024, 2)) @@ -61,3 +94,47 @@ def _make( return run return _make + + +@fixture +def make_ensemble_builder( + make_backend: Callable[..., Backend], + make_sklearn_dataset: Callable[..., XYDataManager], +) -> Callable[..., EnsembleBuilder]: + def _make( + *, + previous_candidates: list[Run] | None = None, + backend: Backend | None = None, + dataset_name: str = "TEST", + task_type: int = BINARY_CLASSIFICATION, + metric: Scorer = accuracy, + **kwargs: Any, + ) -> EnsembleBuilder: + + if backend is None: + backend = make_backend() + + if not Path(backend._get_datamanager_pickle_filename()).exists(): + datamanager = make_sklearn_dataset( + name="breast_cancer", + task=BINARY_CLASSIFICATION, + feat_type="numerical", # They're all numerical + as_datamanager=True, + ) + backend.save_datamanager(datamanager) + + builder = EnsembleBuilder( + backend=backend, + dataset_name=dataset_name, + task_type=task_type, + metric=metric, + **kwargs, + ) + + if previous_candidates is not None: + with builder.previous_candidates_path.open("wb") as f: + pickle.dump({run.id: run for run in previous_candidates}, f) + + return builder + + return _make diff --git a/test/test_ensemble_builder/test_ensemble_builder.py b/test/test_ensemble_builder/test_ensemble_builder.py new file mode 100644 index 0000000000..33e127f12a --- /dev/null +++ b/test/test_ensemble_builder/test_ensemble_builder.py @@ -0,0 +1,326 @@ +from __future__ import annotations + +from typing import Callable + +from pathlib import Path + +import numpy as np + +from autosklearn.automl_common.common.utils.backend import Backend +from autosklearn.constants import BINARY_CLASSIFICATION +from autosklearn.data.xy_data_manager import XYDataManager +from autosklearn.ensemble_building import EnsembleBuilder, Run +from autosklearn.metrics import roc_auc +from autosklearn.util.functional import bound, pairs + +import pytest +from pytest_cases import fixture, parametrize +from unittest.mock import patch + +from test.conftest import DEFAULT_SEED + + +@fixture +def builder(make_ensemble_builder: Callable[..., EnsembleBuilder]) -> EnsembleBuilder: + return make_ensemble_builder() + + +def test_available_runs(builder: EnsembleBuilder) -> None: + """ + Expects + ------- + * Should be able to read runs from the backends rundir where runs are tagged + {seed}_{numrun}_{budget} + """ + runsdir = Path(builder.backend.get_runs_directory()) + + ids = {(0, i, 0.0) for i in range(1, 10)} + paths = [runsdir / f"{s}_{n}_{b}" for s, n, b in ids] + + for path in paths: + path.mkdir() + + available_runs = builder.available_runs() + + assert len(available_runs) == len(ids) + for run_id in available_runs.keys(): + assert run_id in ids + + +def test_requires_loss_update_with_modified_runs( + builder: EnsembleBuilder, + make_run: Callable[..., Run], +) -> None: + """ + Expects + ------- + * Should include runs that were modified, even if they have a loss + """ + run_okay = [make_run(id=42, loss=1) for _ in range(5)] + run_modified = [make_run(id=13, loss=1, modified=True) for _ in range(5)] + + runs = run_okay + run_modified + + requires_update = builder.requires_loss_update(runs) + + print(run_modified) + print(requires_update) + assert set(run_modified) == set(requires_update) + + +def test_requires_loss_update_with_no_loss( + builder: EnsembleBuilder, + make_run: Callable[..., Run], +) -> None: + """ + Expects + ------- + * Should include runs that have no loss + """ + run_okay = [make_run(loss=10) for _ in range(5)] + run_no_loss = [make_run() for _ in range(5)] + + runs = run_okay + run_no_loss + + requires_update = builder.requires_loss_update(runs) + + assert set(run_no_loss) == set(requires_update) + + +def test_candidates_no_filters( + builder: EnsembleBuilder, make_run: Callable[..., Run] +) -> None: + """ + Expects + ------- + * Should not filter out any viable runs if no filters set. Here a viable run + has a loss and ensemble predictions + """ + dummy = make_run(dummy=True) + runs = [make_run(loss=n) for n in range(10)] + + candidates, discarded = builder.candidates( + runs + [dummy], + better_than_dummy=False, + nbest=None, + performance_range_threshold=None, + ) + + assert len(candidates) == len(runs) + assert len(discarded) == 0 + + +def test_candidates_filters_runs_with_no_predictions( + builder: EnsembleBuilder, make_run: Callable[..., Run] +) -> None: + """ + Expects + ------- + * Should filter out runs with no "ensemble" predictions + """ + bad_runs = [make_run(predictions=None) for _ in range(5)] + dummy = make_run(dummy=True, loss=2) + good_run = make_run(predictions="ensemble", loss=1) + + runs = bad_runs + [dummy] + [good_run] + + candidates, discarded = builder.candidates(runs) + + assert len(candidates) == 1 + assert len(discarded) == len(bad_runs) + assert candidates[0].pred_path("ensemble").exists() + + +def test_candidates_filters_runs_with_no_loss( + builder: EnsembleBuilder, make_run: Callable[..., Run] +) -> None: + """ + Expects + ------- + * Should filter out runs with no loss + """ + bad_runs = [make_run(loss=None) for _ in range(5)] + dummy_run = make_run(dummy=True, loss=2) + good_run = make_run(loss=1) + + runs = bad_runs + [dummy_run] + [good_run] + + candidates, discarded = builder.candidates(runs) + + assert len(candidates) == 1 + assert len(discarded) == len(bad_runs) + assert candidates[0].loss == 1 + + +def test_candidates_filters_out_better_than_dummy( + builder: EnsembleBuilder, + make_run: Callable[..., Run], +) -> None: + """ + Expects + ------- + * Should filter out runs worse than dummy + """ + bad_runs = [make_run(loss=1) for _ in range(2)] + dummy_run = make_run(dummy=True, loss=0) + good_runs = [make_run(loss=-1) for _ in range(3)] + + runs = bad_runs + [dummy_run] + good_runs + + candidates, discarded = builder.candidates(runs, better_than_dummy=True) + + assert len(candidates) == 3 + assert all(run.loss < dummy_run.loss for run in candidates) + + assert len(discarded) == 2 + assert all(run.loss >= dummy_run.loss for run in discarded) + + +def test_candidates_expects_dummy( + builder: EnsembleBuilder, + make_run: Callable[..., Run], +) -> None: + """ + Expects + ------- + * Should raise if not given a dummy run + """ + runs = [make_run(dummy=False, loss=1) for _ in range(5)] + with pytest.raises(ValueError): + builder.candidates(runs) + + +def test_candidates_uses_dummy_if_no_real( + builder: EnsembleBuilder, + make_run: Callable[..., Run], +) -> None: + """ + Expects + ------- + * Should use dummy runs if no real candidates exist + """ + runs = [make_run(dummy=True, loss=1) for _ in range(5)] + candidates, discarded = builder.candidates(runs) + + assert len(discarded) == 0 + assert all(run.is_dummy() for run in candidates) + + +def test_candidates_uses_dummy_if_no_candidates_better( + builder: EnsembleBuilder, + make_run: Callable[..., Run], +) -> None: + """ + Expects + ------- + * If no run is better than a dummy run, the candidates will then consist + of the dummy runs. + """ + bad_runs = [make_run(loss=10) for _ in range(10)] + dummies = [make_run(dummy=True, loss=0) for _ in range(2)] + + runs = bad_runs + dummies + + candidates, discarded = builder.candidates(runs, better_than_dummy=True) + + assert len(candidates) == 2 + assert all(run.is_dummy() for run in candidates) + + +@parametrize("nbest", [0, 1, 5, 1000]) +def test_candidates_nbest_int( + builder: EnsembleBuilder, + make_run: Callable[..., Run], + nbest: int, +) -> None: + """ + Expects + ------- + * Should only select the nbest candidates + * They should be ordered by loss + """ + n = 10 + expected = int(bound(nbest, bounds=(1, n))) + + dummy = make_run(dummy=True) + runs = [make_run(loss=i) for i in range(n)] + [dummy] + candidates, discarded = builder.candidates(runs, nbest=nbest) + + assert len(candidates) == expected + + if len(candidates) > 1: + assert all(a.loss <= b.loss for a, b in pairs(candidates)) + + if any(discarded): + worst_candidate = candidates[-1] + assert all(worst_candidate.loss <= d.loss for d in discarded) + + +@parametrize("nbest", [0.0, 0.25, 0.5, 1.0]) +def test_candidates_nbest_float( + builder: EnsembleBuilder, + make_run: Callable[..., Run], + nbest: float, +) -> None: + """ + Expects + ------- + * Should select nbest percentage of candidates + * They should be ordered by loss + """ + n = 10 + expected = int(bound(nbest * n, bounds=(1, n))) + + dummy = make_run(dummy=True, loss=0) + runs = [make_run(id=i, loss=i) for i in range(2, n + 2)] + [dummy] + candidates, discarded = builder.candidates(runs, nbest=nbest) + + assert len(candidates) == expected + + if len(candidates) > 1: + assert all(a.loss <= b.loss for a, b in pairs(candidates)) + + if any(discarded): + worst_candidate = candidates[-1] + assert all(worst_candidate.loss <= d.loss for d in discarded) + + +@parametrize("threshold", [0.0, 0.25, 0.5, 1.0]) +def test_candidates_performance_range_threshold( + builder: EnsembleBuilder, + make_run: Callable[..., Run], + threshold: float, +) -> None: + """ + Expects + ------- + * Should select runs that are `threshold` between the dummy loss and the best loss + This value is captured in `boundary`. + """ + worst_loss = 100 + best_loss = 0 + dummy_loss = 50 + + boundary = threshold * best_loss + (1 - threshold) * dummy_loss + print("boundary", boundary) + + dummy = make_run(dummy=True, loss=dummy_loss) + runs = [make_run(loss=loss) for loss in np.linspace(best_loss, worst_loss, 101)] + runs += [dummy] + + candidates, discarded = builder.candidates( + runs, + performance_range_threshold=threshold, + ) + + # When no run is better than threshold, we just get 1 candidate, + # Make sure it's the best + if len(candidates) == 1: + assert all(r.loss >= candidates[0].loss for r in discarded) + + else: + for run in candidates: + assert run.loss < boundary + + for run in discarded: + assert run.loss >= boundary diff --git a/test/test_ensemble_builder/test_ensemble_builder_mock_data.py b/test/test_ensemble_builder/test_ensemble_builder_mock_data.py index fd873f6ae4..18d0643b05 100644 --- a/test/test_ensemble_builder/test_ensemble_builder_mock_data.py +++ b/test/test_ensemble_builder/test_ensemble_builder_mock_data.py @@ -10,13 +10,7 @@ from autosklearn.automl_common.common.utils.backend import Backend from autosklearn.constants import BINARY_CLASSIFICATION from autosklearn.data.xy_data_manager import XYDataManager -from autosklearn.ensemble_building.builder import ( - Y_ENSEMBLE, - Y_TEST, - Y_VALID, - EnsembleBuilder, - Run, -) +from autosklearn.ensemble_building import EnsembleBuilder, Run from autosklearn.metrics import roc_auc from pytest_cases import fixture, parametrize @@ -25,21 +19,20 @@ from test.conftest import DEFAULT_SEED -@fixture -def dummy_backend( - tmp_path: Path, - make_sklearn_dataset: Callable[..., XYDataManager], - make_backend: Callable[..., Backend], -) -> Backend: - datamanager = make_sklearn_dataset( - name="breast_cancer", - task=BINARY_CLASSIFICATION, - feat_type="numerical", # They're all numerical - as_datamanager=True, - ) - backend = make_backend(path=tmp_path / "backend") - backend.save_datamanager(datamanager) - return backend +def test_available_runs(make_ensemble_builder: Callable[..., EnsembleBuilder]) -> None: + builder = make_ensemble_builder() + runsdir = Path(builder.backend.get_runs_directory()) + + ids = {(0, i, 0.0) for i in range(1, 10)} + paths = [runsdir / f"{s}_{n}_{b}" for s, n, b in ids] + + for path in paths: + path.mkdir() + + available_runs = builder.available_runs() + + for run_id in available_runs.keys(): + assert run_id in ids @parametrize("n_models", [20, 50]) @@ -47,13 +40,15 @@ def dummy_backend( @parametrize("mem_largest_mult", [1, 2, 10]) @parametrize("n_expected", [1, 3, 5, 10]) @parametrize("largest_is_best", [True, False]) -def test_max_models_on_disc_with_float_selects_expected_models( +def test_candidates_memory_limit( n_models: int, mem_model: int, mem_largest_mult: int, n_expected: int, largest_is_best: bool, - dummy_backend: Backend, + backend: Backend, + make_ensemble_builder: Callable[..., EnsembleBuilder], + make_run: Callable[..., Run], ) -> None: """ Parameters @@ -75,8 +70,8 @@ def test_max_models_on_disc_with_float_selects_expected_models( Fixtures -------- - dummy_backend: Backend - Just a backend that's valid, contents don't matter for this test + make_ensemble_builder: Callable[..., EnsembleBuilder] + make_run: Callable[..., Run] Note ---- @@ -94,56 +89,36 @@ def test_max_models_on_disc_with_float_selects_expected_models( * The ensemble builder should select the expected number of models given the calculated `max_models_on_disc`. """ - - # These are arranged so the last one is best, with the lose loss runs = [ - Run( - seed=DEFAULT_SEED, - num_run=n, - budget=0.0, - loss=10 * -n, - loaded=1, - mem_usage=mem_model, - ens_file=f"pred{n}", - ) + make_run(id=n, loss=10 * n, mem_usage=mem_model, backend=backend) for n in range(1, n_models + 1) ] mem_largest = mem_model * mem_largest_mult if largest_is_best: - runs[-1].mem_usage = mem_largest + runs[-1]._mem_usage = mem_largest else: - runs[0].mem_usage = mem_largest + runs[0]._mem_usage = mem_largest nbest = sorted(runs, key=lambda run: run.loss)[:n_expected] mem_for_nbest = sum(run.mem_usage for run in nbest) + model_memory_limit = float(mem_for_nbest + mem_largest) # type: ignore - slack = mem_largest # Slack introduced is the size of the largest model - max_models_on_disc = float(mem_for_nbest + slack) # type: ignore - print(max_models_on_disc) - - ensbuilder = EnsembleBuilder( - backend=dummy_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - max_models_on_disc=max_models_on_disc, - memory_limit=None, + builder = make_ensemble_builder( + max_models_on_disc=model_memory_limit, + backend=backend, ) - # Enter the models, with each model being progressibly better - ensbuilder._runs = {f"pred{i}": run for i, run in enumerate(runs, start=1)} - ensbuilder._run_predictions = { - f"pred{n}": {Y_ENSEMBLE: np.array([1])} for n in range(1, n_models + 1) - } - - sel_keys = ensbuilder.get_n_best_preds() + candidates, discarded = builder.candidates( + runs, + model_memory_limit=model_memory_limit, + ) - # The last expected_to_save models should be saved, the range iters backwards - expected = [f"pred{n}" for n in range(n_models, n_models - n_expected, -1)] + # We expect to save the first n runs as those are the ones with thel lowest loss + expected = runs[:n_expected] - assert len(sel_keys) == len(expected) and sel_keys == expected + assert expected == candidates + assert set(runs) - set(candidates) == set(discarded) @parametrize("n_models", [50, 10, 2, 1]) From f8b5b3580667f7447ae62c5afd4586f6d732401c Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 10 Apr 2022 17:52:05 +0200 Subject: [PATCH 065/117] Made delete it's own function --- autosklearn/ensemble_building/builder.py | 32 ++++++++++++++++-------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 28f1d7aa31..4a91a1b249 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any, Sequence, cast +from typing import Any, Iterable, Sequence, cast import logging.handlers import multiprocessing @@ -428,7 +428,7 @@ def main( run.record_modified_times() # So we don't count as modified next time run.loss = self.loss(run, kind="ensemble") - runs_keep, runs_delete = self.requires_deletion( + runs_keep, runs_to_delete = self.requires_deletion( runs, max_models=self.max_models_on_disk, memory_limit=self.model_memory_limit, @@ -499,14 +499,8 @@ def main( pickle.dump({run.id: run for run in candidates}, f) # Delete files for models which were not considered candidates - if any(runs_delete): - for run in runs_delete: - if not run.is_dummy(): - try: - shutil.rmtree(run.dir) - self.logger.info(f"Deleted files for {run}") - except Exception as e: - self.logger.error(f"Failed to delete files for {run}: \n{e}") + if any(runs_to_delete): + self.delete_runs(runs_to_delete) # If there was any change from the previous run, either in terms of # runs or one of those runs had its loss updated, then we need to @@ -895,3 +889,21 @@ def loss(self, run: Run, kind: str = "ensemble") -> float: loss = np.inf finally: return loss + + def delete_runs(self, runs: Iterable[Run]) -> None: + """Delete runs + + Will not delete dummy runs + + Parameters + ---------- + runs : Sequence[Run] + The runs to delete + """ + real_runs = iter(run for run in runs if not run.is_dummy()) + for run in real_runs: + try: + shutil.rmtree(run.dir) + self.logger.info(f"Deleted files for {run}") + except Exception as e: + self.logger.error(f"Failed to delete files for {run}: \n{e}") From f12625185e3768914647985034abde7304ca74f1 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 10 Apr 2022 20:06:46 +0200 Subject: [PATCH 066/117] Further simplifications --- autosklearn/ensemble_building/builder.py | 203 ++++++++++++----------- 1 file changed, 104 insertions(+), 99 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 4a91a1b249..b73185cbe8 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -23,7 +23,7 @@ from autosklearn.ensemble_building.run import Run, RunID from autosklearn.ensembles.ensemble_selection import EnsembleSelection from autosklearn.metrics import Scorer, calculate_loss, calculate_score -from autosklearn.util.functional import bound, cut, findwhere, intersection, split +from autosklearn.util.functional import cut, findwhere, split from autosklearn.util.logging_ import get_named_client_logger from autosklearn.util.parallel import preload_modules @@ -428,67 +428,79 @@ def main( run.record_modified_times() # So we don't count as modified next time run.loss = self.loss(run, kind="ensemble") - runs_keep, runs_to_delete = self.requires_deletion( - runs, - max_models=self.max_models_on_disk, - memory_limit=self.model_memory_limit, - ) + # Get the dummy and real runs + dummies, candidates = split(runs, by=lambda r: r.is_dummy()) - candidates, all_discarded = self.candidates( - runs_keep, - better_than_dummy=True, - nbest=self.ensemble_nbest, - performance_range_threshold=self.performance_range_threshold, - ) + # We see if we need to delete any of the real runs before we waste compute + # on evaluating their candidacy for ensemble building + if any(candidates): + candidates, to_delete = self.requires_deletion( + candidates, + max_models=self.max_models_on_disk, + memory_limit=self.model_memory_limit, + ) - if len(candidates) == 0: - self.logger.debug("No viable candidates found for ensemble building") - return self.ensemble_history, self.ensemble_nbest + # If there are no candidates left, we just keep the best one + if not any(candidates): + best = sorted(to_delete, key=lambda r: (r.loss, r.num_run))[0] + candidates = [best] + to_delete.remove(best) + + self.delete_runs(to_delete) + + # If there are any candidates, perform candidates selection + if any(candidates): + candidates, to_delete = self.candidate_selection( + runs=candidates, + dummies=dummies, + better_than_dummy=True, + nbest=self.ensemble_nbest, + performance_range_threshold=self.performance_range_threshold, + ) + self.delete_runs(to_delete) + else: + candidates = dummies + self.logger.warning("No real runs to build ensemble from") # Get a set representation of them as we will begin doing intersections # Not here that valid_set and test_set are both subsets of candidates_set + # ... then find intersect and use that to fit the ensemble candidates_set = set(candidates) - valid_set = {r for r in candidates if r.pred_path("valid").exists()} - test_set = {r for r in candidates if r.pred_path("test").exists()} + valid_subset = {r for r in candidates if r.pred_path("valid").exists()} + test_subset = {r for r in candidates if r.pred_path("test").exists()} - if len(valid_set & test_set) == 0 and len(test_set) > 0 and len(valid_set) > 0: + intersect = valid_subset & test_subset + if len(intersect) == 0 and len(test_subset) > 0 and len(valid_subset) > 0: self.logger.error("valid_set and test_set not empty but do not overlap") return self.ensemble_history, self.ensemble_nbest - # Find the intersect between the most groups and use that to fit the ensemble - intersect = intersection(candidates_set, valid_set, test_set) + # Try to use the runs which have the most kinds of preds, otherwise just use all if len(intersect) > 0: - candidates = list(intersect) - candidates = sorted(candidates, key=lambda r: r.id) - + candidates = sorted(intersect, key=lambda r: r.id) valid_models = candidates test_models = candidates - elif len(candidates_set & valid_set) > 0: - intersect = candidates_set & valid_set - candidates, discarded = split(candidates, by=lambda r: r in intersect) - candidates = sorted(candidates, key=lambda r: r.id) + self.delete_runs(candidates_set - intersect) + elif len(valid_subset) > 0: + candidates = sorted(valid_subset, key=lambda r: r.id) valid_models = candidates test_models = [] - elif len(candidates_set & test_set) > 0: - intersect = candidates_set & test_set - candidates, discarded = split(candidates, by=lambda r: r in intersect) - candidates = sorted(candidates, key=lambda r: r.id) + self.delete_runs(candidates_set - valid_subset) + elif len(test_subset) > 0: + candidates = sorted(test_subset, key=lambda r: r.id) valid_models = [] test_models = candidates - else: - candidates = sorted(candidates, key=lambda r: r.id) - discarded = [] + self.delete_runs(candidates_set - test_subset) + else: + candidates = sorted(candidates_set, key=lambda r: r.id) valid_models = [] test_models = [] - all_discarded.update(discarded) - # To save on pickle and to allow for fresh predictions, unload the cache # before pickling for run in candidates: @@ -498,20 +510,16 @@ def main( with self.previous_candidates_path.open("wb") as f: pickle.dump({run.id: run for run in candidates}, f) - # Delete files for models which were not considered candidates - if any(runs_to_delete): - self.delete_runs(runs_to_delete) - # If there was any change from the previous run, either in terms of # runs or one of those runs had its loss updated, then we need to # fit the ensemble builder - previous_candidate_ids = set(previous_candidates.keys()) + previous_candidate_ids = set(previous_candidates) current_candidate_ids = set(run.id for run in candidates) - different_candidates = previous_candidate_ids ^ current_candidate_ids + difference = previous_candidate_ids ^ current_candidate_ids updated_candidates = iter(run in candidates for run in requires_update) - if not any(different_candidates) or any(updated_candidates): + if not any(difference) or any(updated_candidates): self.logger.info("All ensemble candidates the same, no update required") return self.ensemble_history, self.ensemble_nbest @@ -603,26 +611,30 @@ def requires_loss_update( return queue - def candidates( + def candidate_selection( self, runs: Sequence[Run], + dummies: Run | list[Run], *, better_than_dummy: bool = False, nbest: int | float | None = None, performance_range_threshold: float | None = None, ) -> tuple[list[Run], set[Run]]: - """Get a list of candidates from `runs` + """Get a list of candidates from `runs`, garuanteeing at least one Applies a set of reductions in order of parameters to reach a set of final candidates. - Expects at least one `dummy` run in `runs`. + Expects at least one `dummies` run. Parameters ---------- runs : Sequence[Run] The runs to evaluate candidates from. + dummies: Run | Sequence[Run] + The dummy run to base from + better_than_dummy: bool = False Whether the run must be better than the best dummy run to be a candidate. In the case where there are no candidates left, the dummies will then be @@ -645,45 +657,40 @@ def candidates( (candidates: list[Run], discarded: set[Run]) A tuple of runs that are candidates and also those that didn't make it """ + if isinstance(dummies, Run): + dummies = [dummies] + + assert len(dummies) > 0 and len(runs) > 0, "At least 1 real run and dummy run" + all_discarded: set[Run] = set() # We filter out all runs that don't have any predictions for the ensemble - has_predictions = lambda run: run.pred_path("ensemble").exists() - candidates, discarded = split(runs, by=has_predictions) + candidates, discarded = split( + runs, by=lambda run: run.pred_path("ensemble").exists() + ) all_discarded.update(discarded) if len(candidates) == 0: - self.logger.debug("No runs with predictions on ensemble data set") - return candidates, all_discarded + self.logger.debug("No runs with predictions on ensemble set, using dummies") + return dummies, all_discarded - if len(discarded) > 0: - self.logger.warning(f"Have no ensemble predictions for {discarded}") + for run in discarded: + self.logger.warning(f"Have no ensemble predictions for {run}") # Get all the ones that have a tangible loss - candidates, discarded = split(candidates, by=lambda r: r.loss < np.inf) + candidates, discarded = split( + candidates, + by=lambda r: r.loss < np.inf, + ) all_discarded.update(discarded) if len(candidates) == 0: - self.logger.debug("No runs with a usable loss") - return candidates, all_discarded - - # Further split the candidates into those that are real and dummies - dummies, candidates = split(candidates, by=lambda r: r.is_dummy()) - n_real = len(candidates) - - if len(dummies) == 0: - self.logger.error("Expected at least one dummy run") - raise ValueError("Expected at least one dummy run") - - dummies = sorted(dummies, key=lambda r: r.loss) - dummy_cutoff = dummies[0].loss - - if len(candidates) == 0: - self.logger.warning("No real runs, using dummies as candidates") - candidates = dummies - return candidates, all_discarded + self.logger.debug("No runs with a usable loss, using dummies") + return dummies, all_discarded if better_than_dummy: + dummies = sorted(dummies, key=lambda r: r.loss) + dummy_cutoff = dummies[0].loss self.logger.debug(f"Using {dummy_cutoff} to filter candidates") candidates, discarded = split( @@ -694,32 +701,30 @@ def candidates( # If there are no real candidates left, use the dummies if len(candidates) == 0: - candidates = dummies - if n_real > 0: - self.logger.warning( - "No models better than random - using Dummy loss!" - f"\n\tNumber of models besides current dummy model: {n_real}" - f"\n\tNumber of dummy models: {len(dummies)}", - ) - - n_candidates = len(candidates) - - # Decide how many instanceto keep - nkeep: int | None - if isinstance(nbest, float): - nkeep = int(bound(n_candidates * nbest, bounds=(1, n_candidates))) - elif isinstance(nbest, int): - nkeep = int(bound(nbest, bounds=(1, n_candidates))) - else: - nkeep = None + self.logger.warning( + "No models better than random - using Dummy loss!" + f"\n\tModels besides current dummy model: {len(candidates)}" + f"\n\tDummy models: {len(dummies)}", + ) + return dummies, all_discarded # Sort the candidates so that they ordered by best loss, using num_run for tie candidates = sorted(candidates, key=lambda r: (r.loss, r.num_run)) - if nkeep is not None: + if nbest is not None: + # Determine how many to keep, always keeping one + if isinstance(nbest, float): + nkeep = int(len(candidates) * nbest) + candidates, discarded = cut(candidates, nkeep) + self.logger.info(f"Discarding {len(discarded)}/{len(candidates)} runs") + + # Always preserve at least one, the best + if len(candidates) == 0: + candidates, discared = cut(discarded, 1) + self.logger.warning("nbest too aggresive, using best") + all_discarded.update(discarded) - self.logger.info(f"Discarding {len(discarded)}/{n_candidates} runs") if performance_range_threshold is not None: x = performance_range_threshold @@ -730,12 +735,12 @@ def candidates( candidates, discarded = cut(candidates, where=lambda r: r.loss >= cutoff) - all_discarded.update(discarded) + # Always preserve at least one, the best + if len(candidates) == 0: + candidates, discared = cut(discarded, 1) + self.logger.warning("No models in performance range, using best") - # Ensure there's always at least one candidate - if not any(candidates): - sorted_discarded = sorted(all_discarded, key=lambda r: r.loss) - candidates, all_discarded = sorted_discarded[:1], set(sorted_discarded[1:]) + all_discarded.update(discarded) return candidates, all_discarded @@ -828,7 +833,7 @@ def requires_deletion( if memory_limit is None and max_models is None: return list(runs), set() - # Start with keep all runs and deleteing None + # Start with keep all runs and dummies, deleteing None keep = sorted(runs, key=lambda r: (r.loss, r.num_run)) delete: set[Run] = set() @@ -900,8 +905,8 @@ def delete_runs(self, runs: Iterable[Run]) -> None: runs : Sequence[Run] The runs to delete """ - real_runs = iter(run for run in runs if not run.is_dummy()) - for run in real_runs: + items = iter(run for run in runs if not run.is_dummy() and run.dir.exists()) + for run in items: try: shutil.rmtree(run.dir) self.logger.info(f"Deleted files for {run}") From 2e116e01fb7075be2b1736f0e2e993a267ab47b4 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Sun, 10 Apr 2022 20:40:44 +0200 Subject: [PATCH 067/117] Fixup test with simplification --- autosklearn/ensemble_building/builder.py | 2 + .../test_ensemble_builder.py | 107 ++++++++---------- 2 files changed, 48 insertions(+), 61 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index b73185cbe8..3bb86b53e3 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -715,6 +715,8 @@ def candidate_selection( # Determine how many to keep, always keeping one if isinstance(nbest, float): nkeep = int(len(candidates) * nbest) + else: + nkeep = nbest candidates, discarded = cut(candidates, nkeep) self.logger.info(f"Discarding {len(discarded)}/{len(candidates)} runs") diff --git a/test/test_ensemble_builder/test_ensemble_builder.py b/test/test_ensemble_builder/test_ensemble_builder.py index 33e127f12a..32178d2cb1 100644 --- a/test/test_ensemble_builder/test_ensemble_builder.py +++ b/test/test_ensemble_builder/test_ensemble_builder.py @@ -6,18 +6,10 @@ import numpy as np -from autosklearn.automl_common.common.utils.backend import Backend -from autosklearn.constants import BINARY_CLASSIFICATION -from autosklearn.data.xy_data_manager import XYDataManager from autosklearn.ensemble_building import EnsembleBuilder, Run -from autosklearn.metrics import roc_auc from autosklearn.util.functional import bound, pairs -import pytest from pytest_cases import fixture, parametrize -from unittest.mock import patch - -from test.conftest import DEFAULT_SEED @fixture @@ -56,15 +48,13 @@ def test_requires_loss_update_with_modified_runs( ------- * Should include runs that were modified, even if they have a loss """ - run_okay = [make_run(id=42, loss=1) for _ in range(5)] - run_modified = [make_run(id=13, loss=1, modified=True) for _ in range(5)] + run_okay = [make_run(loss=1) for _ in range(5)] + run_modified = [make_run(loss=1, modified=True) for _ in range(5)] runs = run_okay + run_modified requires_update = builder.requires_loss_update(runs) - print(run_modified) - print(requires_update) assert set(run_modified) == set(requires_update) @@ -99,8 +89,9 @@ def test_candidates_no_filters( dummy = make_run(dummy=True) runs = [make_run(loss=n) for n in range(10)] - candidates, discarded = builder.candidates( - runs + [dummy], + candidates, discarded = builder.candidate_selection( + runs, + dummy, better_than_dummy=False, nbest=None, performance_range_threshold=None, @@ -122,9 +113,9 @@ def test_candidates_filters_runs_with_no_predictions( dummy = make_run(dummy=True, loss=2) good_run = make_run(predictions="ensemble", loss=1) - runs = bad_runs + [dummy] + [good_run] + runs = bad_runs + [good_run] - candidates, discarded = builder.candidates(runs) + candidates, discarded = builder.candidate_selection(runs, dummy) assert len(candidates) == 1 assert len(discarded) == len(bad_runs) @@ -143,9 +134,9 @@ def test_candidates_filters_runs_with_no_loss( dummy_run = make_run(dummy=True, loss=2) good_run = make_run(loss=1) - runs = bad_runs + [dummy_run] + [good_run] + runs = bad_runs + [good_run] - candidates, discarded = builder.candidates(runs) + candidates, discarded = builder.candidate_selection(runs, dummy_run) assert len(candidates) == 1 assert len(discarded) == len(bad_runs) @@ -165,9 +156,11 @@ def test_candidates_filters_out_better_than_dummy( dummy_run = make_run(dummy=True, loss=0) good_runs = [make_run(loss=-1) for _ in range(3)] - runs = bad_runs + [dummy_run] + good_runs + runs = bad_runs + good_runs - candidates, discarded = builder.candidates(runs, better_than_dummy=True) + candidates, discarded = builder.candidate_selection( + runs, dummy_run, better_than_dummy=True + ) assert len(candidates) == 3 assert all(run.loss < dummy_run.loss for run in candidates) @@ -176,36 +169,6 @@ def test_candidates_filters_out_better_than_dummy( assert all(run.loss >= dummy_run.loss for run in discarded) -def test_candidates_expects_dummy( - builder: EnsembleBuilder, - make_run: Callable[..., Run], -) -> None: - """ - Expects - ------- - * Should raise if not given a dummy run - """ - runs = [make_run(dummy=False, loss=1) for _ in range(5)] - with pytest.raises(ValueError): - builder.candidates(runs) - - -def test_candidates_uses_dummy_if_no_real( - builder: EnsembleBuilder, - make_run: Callable[..., Run], -) -> None: - """ - Expects - ------- - * Should use dummy runs if no real candidates exist - """ - runs = [make_run(dummy=True, loss=1) for _ in range(5)] - candidates, discarded = builder.candidates(runs) - - assert len(discarded) == 0 - assert all(run.is_dummy() for run in candidates) - - def test_candidates_uses_dummy_if_no_candidates_better( builder: EnsembleBuilder, make_run: Callable[..., Run], @@ -216,12 +179,14 @@ def test_candidates_uses_dummy_if_no_candidates_better( * If no run is better than a dummy run, the candidates will then consist of the dummy runs. """ - bad_runs = [make_run(loss=10) for _ in range(10)] + runs = [make_run(loss=10) for _ in range(10)] dummies = [make_run(dummy=True, loss=0) for _ in range(2)] - runs = bad_runs + dummies - - candidates, discarded = builder.candidates(runs, better_than_dummy=True) + candidates, discarded = builder.candidate_selection( + runs, + dummies, + better_than_dummy=True, + ) assert len(candidates) == 2 assert all(run.is_dummy() for run in candidates) @@ -243,8 +208,8 @@ def test_candidates_nbest_int( expected = int(bound(nbest, bounds=(1, n))) dummy = make_run(dummy=True) - runs = [make_run(loss=i) for i in range(n)] + [dummy] - candidates, discarded = builder.candidates(runs, nbest=nbest) + runs = [make_run(loss=i) for i in range(n)] + candidates, discarded = builder.candidate_selection(runs, dummy, nbest=nbest) assert len(candidates) == expected @@ -272,8 +237,8 @@ def test_candidates_nbest_float( expected = int(bound(nbest * n, bounds=(1, n))) dummy = make_run(dummy=True, loss=0) - runs = [make_run(id=i, loss=i) for i in range(2, n + 2)] + [dummy] - candidates, discarded = builder.candidates(runs, nbest=nbest) + runs = [make_run(id=i, loss=i) for i in range(2, n + 2)] + candidates, discarded = builder.candidate_selection(runs, dummy, nbest=nbest) assert len(candidates) == expected @@ -302,14 +267,13 @@ def test_candidates_performance_range_threshold( dummy_loss = 50 boundary = threshold * best_loss + (1 - threshold) * dummy_loss - print("boundary", boundary) dummy = make_run(dummy=True, loss=dummy_loss) runs = [make_run(loss=loss) for loss in np.linspace(best_loss, worst_loss, 101)] - runs += [dummy] - candidates, discarded = builder.candidates( + candidates, discarded = builder.candidate_selection( runs, + dummy, performance_range_threshold=threshold, ) @@ -324,3 +288,24 @@ def test_candidates_performance_range_threshold( for run in discarded: assert run.loss >= boundary + + +def test_requires_deletion_does_nothing_without_params( + builder: EnsembleBuilder, + make_run: Callable[..., Run], +) -> None: + """ + Expects + ------- + * All runs should be kept + """ + runs = [make_run() for _ in range(10)] + + keep, delete = builder.requires_deletion( + runs, + max_models=None, + memory_limit=None, + ) + + assert set(runs) == set(keep) + assert len(delete) == 0 From b900fa44ca27c69d75c211eaf578c6e0506ac865 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Mon, 18 Apr 2022 14:43:06 +0200 Subject: [PATCH 068/117] Test: `max_models` for `requires_deletion` --- autosklearn/ensemble_building/builder.py | 6 ++++-- .../test_ensemble_builder.py | 21 +++++++++++++++++++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 3bb86b53e3..9e0960bac7 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -839,9 +839,11 @@ def requires_deletion( keep = sorted(runs, key=lambda r: (r.loss, r.num_run)) delete: set[Run] = set() - if max_models is not None and max_models > len(runs): + if max_models is not None and max_models < len(runs): keep, to_delete = cut(keep, max_models) - delete.update(to_delete) + + if any(to_delete): + delete.update(to_delete) if memory_limit is not None: largest = max(runs, key=lambda r: r.mem_usage) diff --git a/test/test_ensemble_builder/test_ensemble_builder.py b/test/test_ensemble_builder/test_ensemble_builder.py index 32178d2cb1..399066e6df 100644 --- a/test/test_ensemble_builder/test_ensemble_builder.py +++ b/test/test_ensemble_builder/test_ensemble_builder.py @@ -309,3 +309,24 @@ def test_requires_deletion_does_nothing_without_params( assert set(runs) == set(keep) assert len(delete) == 0 + + +@parametrize("max_models", [0, 1, 2, 5]) +def test_requires_deletion_max_models( + builder: EnsembleBuilder, + max_models: int, + make_run: Callable[..., Run], +) -> None: + """ + Expects + ------- + * Should keep exactly as many models as `max_models` + * Should not have any in common between keep and delete + """ + runs = [make_run() for _ in range(10)] + keep, delete = builder.requires_deletion(runs=runs, max_models=max_models) + + assert len(keep) == max_models + assert len(delete) == len(runs) - max_models + + assert not any(set(keep) & set(delete)) From 6f37f39ac51624af5c73b706be1b65eb9ae65890 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Mon, 18 Apr 2022 14:57:28 +0200 Subject: [PATCH 069/117] Test: `memory_limit` for `requires_deletion` --- autosklearn/ensemble_building/builder.py | 2 +- .../test_ensemble_builder.py | 40 +++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 9e0960bac7..c4a166de58 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -851,7 +851,7 @@ def requires_deletion( accumulated_mem_usage = accumulate(r.mem_usage for r in runs) - cutpoint = findwhere(accumulated_mem_usage, lambda mem: mem >= cutoff) + cutpoint = findwhere(accumulated_mem_usage, lambda mem: mem > cutoff) keep, to_delete = cut(keep, cutpoint) if any(to_delete): diff --git a/test/test_ensemble_builder/test_ensemble_builder.py b/test/test_ensemble_builder/test_ensemble_builder.py index 399066e6df..f767cc984a 100644 --- a/test/test_ensemble_builder/test_ensemble_builder.py +++ b/test/test_ensemble_builder/test_ensemble_builder.py @@ -2,6 +2,7 @@ from typing import Callable +import random from pathlib import Path import numpy as np @@ -330,3 +331,42 @@ def test_requires_deletion_max_models( assert len(delete) == len(runs) - max_models assert not any(set(keep) & set(delete)) + + +@parametrize("memory_limit, expected", [(0, 0), (100, 0), (200, 1), (5000, 49)]) +def test_requires_memory_limit( + builder: EnsembleBuilder, + make_run: Callable[..., Run], + memory_limit: int, + expected: int, +) -> None: + """ + Expects + ------- + * Should keep the expected amount of models + * The kept models should be sorted by lowest loss + * Should not have any models in common between keep and delete + * All models kept should be better than those deleted + """ + runs = [make_run(mem_usage=100, loss=-n) for n in range(50)] + random.shuffle(runs) + + keep, delete = builder.requires_deletion(runs=runs, memory_limit=memory_limit) + + # The cutoff for memory is (memory_limit - largest) + # E.g. + # 5 models at 100 ea = 500mb usage + # largest = 100mb + # memory_limit = 400mb + # cutoff = memory_limit - largest (400mb - 100mb) = 300mb + # We can store 300mb which means the 3 best models + assert len(keep) == expected + assert len(delete) == len(runs) - expected + + assert not any(set(keep) & set(delete)) + + if len(keep) > 2: + assert all(a.loss <= b.loss for a, b in pairs(keep)) + + best_deleted = min(r.loss for r in delete) + assert not any(run.loss > best_deleted for run in keep) From ec9b946c72ef363d72474468c9a5f2711cdae0b7 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Mon, 18 Apr 2022 16:19:28 +0200 Subject: [PATCH 070/117] Test: Loss of runs --- autosklearn/ensemble_building/builder.py | 9 ++-- autosklearn/ensemble_building/run.py | 3 ++ test/fixtures/ensemble_building.py | 6 +++ .../test_ensemble_builder.py | 46 +++++++++++++++++++ 4 files changed, 60 insertions(+), 4 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index c4a166de58..2ca2057c65 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -216,9 +216,10 @@ def targets(self, kind: str = "ensemble") -> np.ndarray | None: np.ndarray | None The ensemble targets, if they can be loaded """ - if kind == "ensemble" and self._y_ensemble is None: - if os.path.exists(self.backend._get_targets_ensemble_filename()): - self._y_ensemble = self.backend.load_targets_ensemble() + if kind == "ensemble": + if self._y_ensemble is None: + if os.path.exists(self.backend._get_targets_ensemble_filename()): + self._y_ensemble = self.backend.load_targets_ensemble() return self._y_ensemble elif kind == "valid": @@ -228,7 +229,7 @@ def targets(self, kind: str = "ensemble") -> np.ndarray | None: return self._y_test else: - raise NotImplementedError() + raise NotImplementedError(kind) def run( self, diff --git a/autosklearn/ensemble_building/run.py b/autosklearn/ensemble_building/run.py index 3297205b45..533465bf53 100644 --- a/autosklearn/ensemble_building/run.py +++ b/autosklearn/ensemble_building/run.py @@ -102,6 +102,9 @@ def predictions( path = self.pred_path(kind) + if not path.exists(): + raise RuntimeError(f"No predictions for {kind}") + with path.open("rb") as f: # TODO: We should probably remove this requirement. I'm not sure why model # predictions are being saved as pickled diff --git a/test/fixtures/ensemble_building.py b/test/fixtures/ensemble_building.py index c0471da600..d0f8d6917a 100644 --- a/test/fixtures/ensemble_building.py +++ b/test/fixtures/ensemble_building.py @@ -123,6 +123,12 @@ def _make( ) backend.save_datamanager(datamanager) + # Annoyingly, some places use datamanger, some places use the file + # Hence, we take the y_train of the datamanager and use that as the + # the targets + if "Y_train" in datamanager.data: + backend.save_targets_ensemble(datamanager.data["Y_train"]) + builder = EnsembleBuilder( backend=backend, dataset_name=dataset_name, diff --git a/test/test_ensemble_builder/test_ensemble_builder.py b/test/test_ensemble_builder/test_ensemble_builder.py index f767cc984a..48826670f0 100644 --- a/test/test_ensemble_builder/test_ensemble_builder.py +++ b/test/test_ensemble_builder/test_ensemble_builder.py @@ -12,12 +12,24 @@ from pytest_cases import fixture, parametrize +from test.util import fails + @fixture def builder(make_ensemble_builder: Callable[..., EnsembleBuilder]) -> EnsembleBuilder: return make_ensemble_builder() +@parametrize("kind", ["ensemble", fails("valid", "Not supported anymore?"), "test"]) +def test_targets(builder: EnsembleBuilder, kind: str) -> None: + """ + Expects + ------- + * Should be able to load each of the targets + """ + assert builder.targets(kind) is not None + + def test_available_runs(builder: EnsembleBuilder) -> None: """ Expects @@ -370,3 +382,37 @@ def test_requires_memory_limit( best_deleted = min(r.loss for r in delete) assert not any(run.loss > best_deleted for run in keep) + + +@parametrize("kind", ["ensemble", "valid", "test"]) +def test_loss_with_no_ensemble_targets( + builder: EnsembleBuilder, + make_run: Callable[..., Run], + kind: str, +) -> None: + """ + Expects + ------- + * Should give a loss of np.inf if run has no predictions of a given kind + """ + run = make_run(predictions=None) + + assert builder.loss(run, kind=kind) == np.inf + + +@parametrize("kind", ["ensemble", fails("valid", "Not supported anymore?"), "test"]) +def test_loss_with_targets( + builder: EnsembleBuilder, + make_run: Callable[..., Run], + kind: str, +) -> None: + """ + Expects + ------- + * Should give a loss < np.inf if the predictions exist + """ + targets = builder.targets(kind) + + run = make_run(predictions={kind: targets}) + + assert builder.loss(run, kind) < np.inf From 88834e14858da104ead256b5f7233769c6df1c5f Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Mon, 18 Apr 2022 16:30:42 +0200 Subject: [PATCH 071/117] Test: Delete runs --- .../test_ensemble_builder.py | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/test/test_ensemble_builder/test_ensemble_builder.py b/test/test_ensemble_builder/test_ensemble_builder.py index 48826670f0..24d3bd3943 100644 --- a/test/test_ensemble_builder/test_ensemble_builder.py +++ b/test/test_ensemble_builder/test_ensemble_builder.py @@ -416,3 +416,43 @@ def test_loss_with_targets( run = make_run(predictions={kind: targets}) assert builder.loss(run, kind) < np.inf + + +def test_delete_runs(builder: EnsembleBuilder, make_run: Callable[..., Run]) -> None: + """ + Expects + ------- + * Should delete runs so they can not be found again by the ensemble builder + """ + runs = [make_run(backend=builder.backend) for _ in range(5)] + assert all(run.dir.exists() for run in runs) + + builder.delete_runs(runs) + assert not any(run.dir.exists() for run in runs) + + loaded = builder.available_runs() + assert len(loaded) == 0 + + +def test_delete_runs_does_not_delete_dummy( + builder: EnsembleBuilder, + make_run: Callable[..., Run], +) -> None: + """ + Expects + ------- + * Should + """ + backend = builder.backend + normal_runs = [make_run(backend=backend) for _ in range(5)] + dummy_runs = [make_run(dummy=True, seed=i, backend=backend) for i in range(2)] + + runs = normal_runs + dummy_runs + assert all(run.dir.exists() for run in runs) + + builder.delete_runs(runs) + assert not any(run.dir.exists() for run in normal_runs) + assert all(dummy.dir.exists() for dummy in dummy_runs) + + loaded = builder.available_runs() + assert set(loaded.values()) == set(dummy_runs) From e07adb636ace3f34eff5128dedcf54dcbeaaaae4 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 08:59:32 +0200 Subject: [PATCH 072/117] Test: `fit_ensemble` of ensemble builder --- .../test_ensemble_builder.py | 45 +++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/test/test_ensemble_builder/test_ensemble_builder.py b/test/test_ensemble_builder/test_ensemble_builder.py index 24d3bd3943..110d089191 100644 --- a/test/test_ensemble_builder/test_ensemble_builder.py +++ b/test/test_ensemble_builder/test_ensemble_builder.py @@ -17,6 +17,7 @@ @fixture def builder(make_ensemble_builder: Callable[..., EnsembleBuilder]) -> EnsembleBuilder: + """A default ensemble builder""" return make_ensemble_builder() @@ -456,3 +457,47 @@ def test_delete_runs_does_not_delete_dummy( loaded = builder.available_runs() assert set(loaded.values()) == set(dummy_runs) + + +def test_fit_ensemble_produces_ensemble( + builder: EnsembleBuilder, + make_run: Callable[..., Run], +) -> None: + """ + Expects + ------- + * Should produce an ensemble if all runs have predictions + """ + targets = builder.targets("ensemble") + assert targets is not None + + predictions = targets + runs = [make_run(predictions={"ensemble": predictions}) for _ in range(10)] + + ensemble = builder.fit_ensemble(runs, targets) + + assert ensemble is not None + + +def test_fit_with_error_gives_no_ensemble( + builder: EnsembleBuilder, + make_run: Callable[..., Run], +) -> None: + """ + Expects + ------- + * A run without predictions will raise an error, causing the `fit_ensemble` to fail + and return None + """ + targets = builder.targets("ensemble") + assert targets is not None + + predictions = targets + + runs = [make_run(predictions={"ensemble": predictions}) for _ in range(10)] + bad_run = make_run(predictions=None) + + runs.append(bad_run) + + ensemble = builder.fit_ensemble(runs, targets) + assert ensemble is None From 2e0ccc5be3de99eaa5a3c6c0b74ccd9f552cdc9c Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 09:03:38 +0200 Subject: [PATCH 073/117] Add test for run time parameter --- .../test_ensemble_builder.py | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/test/test_ensemble_builder/test_ensemble_builder.py b/test/test_ensemble_builder/test_ensemble_builder.py index 110d089191..8735068974 100644 --- a/test/test_ensemble_builder/test_ensemble_builder.py +++ b/test/test_ensemble_builder/test_ensemble_builder.py @@ -3,6 +3,7 @@ from typing import Callable import random +import time from pathlib import Path import numpy as np @@ -11,6 +12,7 @@ from autosklearn.util.functional import bound, pairs from pytest_cases import fixture, parametrize +from unittest.mock import patch from test.util import fails @@ -501,3 +503,27 @@ def test_fit_with_error_gives_no_ensemble( ensemble = builder.fit_ensemble(runs, targets) assert ensemble is None + + +@parametrize("time_buffer", [1, 5]) +@parametrize("duration", [10, 20]) +def test_run_end_at(builder: EnsembleBuilder, time_buffer: int, duration: int) -> None: + """ + Expects + ------- + * The limits enforced by pynisher should account for the time_buffer and duration + to run for + a little bit of overhead that gets rounded to a second. + """ + with patch("pynisher.enforce_limits") as pynisher_mock: + builder.run( + end_at=time.time() + duration, + iteration=1, + time_buffer=time_buffer, + pynisher_context="forkserver", + ) + # The 1 comes from the small overhead in conjuction with rounding down + expected = duration - time_buffer - 1 + + # The 1 comes from the small overhead in conjuction with rounding down + expected = duration - time_buffer - 1 + assert pynisher_mock.call_args_list[0][1]["wall_time_in_s"] == expected From 44fa3e8b24651d5e947de2120b81d85cea1249fb Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 09:05:17 +0200 Subject: [PATCH 074/117] Remove parameter `return_predictions` --- autosklearn/ensemble_building/builder.py | 10 +--------- autosklearn/ensemble_building/manager.py | 3 --- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 2ca2057c65..f5fea12642 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -238,7 +238,6 @@ def run( time_left: float | None = None, end_at: float | None = None, time_buffer: int = 5, - return_predictions: bool = False, ) -> tuple[list[dict[str, Any]], int | float]: """Run the ensemble building process @@ -262,9 +261,6 @@ def run( How much extra time to add as a buffer to this run. This means there is always some amount of time to do something useful. - return_predictions : bool = False - Whether run should also return predictions - Returns ------- (ensemble_history, nbest, train_preds, valid_preds, test_preds) @@ -308,7 +304,7 @@ def run( logger=self.logger, context=context, )(self.main) - safe_ensemble_script(time_left, iteration, return_predictions) + safe_ensemble_script(time_left, iteration) if safe_ensemble_script.exit_status is pynisher.MemorylimitException: # if ensemble script died because of memory error, # reduce nbest to reduce memory consumption and try it again @@ -361,7 +357,6 @@ def main( self, time_left: float, iteration: int, - return_predictions: bool = False, ) -> tuple[list[dict[str, Any]], int | float]: """Run the main loop of ensemble building @@ -381,9 +376,6 @@ def main( iteration : int The iteration of this run - return_predictions : bool = False - Whether to return predictions or not - Returns ------- (ensemble_history: list[dict[str, Any]], nbest: int | float) diff --git a/autosklearn/ensemble_building/manager.py b/autosklearn/ensemble_building/manager.py index bef0552e1f..54aa51471f 100644 --- a/autosklearn/ensemble_building/manager.py +++ b/autosklearn/ensemble_building/manager.py @@ -240,7 +240,6 @@ def build_ensemble( random_state=self.random_state, end_at=self.start_time + self.time_left_for_ensembles, iteration=self.iteration, - return_predictions=False, priority=100, pynisher_context=self.pynisher_context, logger_port=self.logger_port, @@ -277,7 +276,6 @@ def fit_and_return_ensemble( read_at_most: int, end_at: float, iteration: int, - return_predictions: bool, pynisher_context: str, max_models_on_disc: Optional[Union[float, int]] = 100, logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, @@ -377,7 +375,6 @@ def fit_and_return_ensemble( ).run( end_at=end_at, iteration=iteration, - return_predictions=return_predictions, pynisher_context=pynisher_context, ) return result From 3cf4bcc28812872d9b6336be5fac1e7bbe8c6297 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 09:06:28 +0200 Subject: [PATCH 075/117] Add note about pickled arrays should not be supported --- test/test_ensemble_builder/test_run.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/test_ensemble_builder/test_run.py b/test/test_ensemble_builder/test_run.py index 34c37dc0c6..1a2abfbba5 100644 --- a/test/test_ensemble_builder/test_run.py +++ b/test/test_ensemble_builder/test_run.py @@ -70,6 +70,10 @@ def test_predictions_pickled(make_run: Callable[..., Run]) -> None: Expects ------- * Should be able to load pickled predictions + + Note + ---- + * Not sure this should be supported """ run = make_run(predictions=[]) x = np.array([0]) From 842e393c683eca51d58ebc9b6474ff5dc465e530 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 10:13:46 +0200 Subject: [PATCH 076/117] Make cached automl instances copy backend --- test/fixtures/backend.py | 78 ++++++++++++----- test/fixtures/caching.py | 54 ++---------- test/test_automl/cases.py | 170 +++++++++++++++++++++++--------------- 3 files changed, 168 insertions(+), 134 deletions(-) diff --git a/test/fixtures/backend.py b/test/fixtures/backend.py index cf88730f86..4770b1873e 100644 --- a/test/fixtures/backend.py +++ b/test/fixtures/backend.py @@ -14,6 +14,51 @@ DATAPATH = HERE.parent / "data" +def copy_backend(old: Backend | Path | str, new: Backend | Path | str) -> Backend: + """Transfers a backend to a new path + + Parameters + ---------- + old_backend: Backend | Path | str + The backend to transfer from + + new_path: Backend | Path | str + Where to place the new backend + + Returns + ------- + Backend + The new backend with the contents of the old + """ + if isinstance(new, str): + new_backend = create( + temporary_directory=new, + output_directory=None, + prefix="auto-sklearn", + ) + elif isinstance(new, Path): + new_backend = create( + temporary_directory=str(new), + output_directory=None, + prefix="auto-sklearn", + ) + else: + new_backend = new + + dst = new_backend.temporary_directory + + if isinstance(old, str): + src = old + elif isinstance(old, Path): + src = str(old) + else: + src = old.temporary_directory + + copy_tree(src, dst) + + return new_backend + + # TODO Update to return path once everything can use a path @fixture def tmp_dir(tmp_path: Path) -> str: @@ -54,29 +99,22 @@ def _make( template: Path | Backend | None = None, ) -> Backend: if path is None: - path = tmp_path / "backend" + _path = Path(tmp_path) / "backend" + elif isinstance(path, str): + _path = Path(path) + else: + _path = path - _path = Path(path) if not isinstance(path, Path) else path - assert not _path.exists(), "Try passing path / 'backend'" - - backend = create( - temporary_directory=str(_path), - output_directory=None, - prefix="auto-sklearn", - ) + assert not _path.exists(), "Path exists, Try passing path / 'backend'" if template is not None: - dest = Path(backend.temporary_directory) - - if isinstance(template, Backend): - template = Path(template.temporary_directory) - - if isinstance(template, Path): - assert template.exists() - copy_tree(str(template), str(dest)) - - else: - raise NotImplementedError(template) + backend = copy_backend(old=template, new=_path) + else: + backend = create( + temporary_directory=str(_path), + output_directory=None, + prefix="auto-sklearn", + ) return backend diff --git a/test/fixtures/caching.py b/test/fixtures/caching.py index b6a4ed2fdc..784b489af8 100644 --- a/test/fixtures/caching.py +++ b/test/fixtures/caching.py @@ -1,15 +1,12 @@ from __future__ import annotations -from typing import Any, Callable, Optional +from typing import Any, Callable import pickle import shutil -import traceback from functools import partial from pathlib import Path -from autosklearn.automl import AutoML - from pytest import FixtureRequest from pytest_cases import fixture @@ -85,7 +82,7 @@ def path(self, name: str) -> Path: """Path to an item for this cache""" return self.dir / name - def _load(self, name: str) -> Any: + def load(self, name: str) -> Any: """Load an item from the cache with a given name""" if self.verbose: print(f"Loading cached item {self.path(name)}") @@ -93,7 +90,7 @@ def _load(self, name: str) -> Any: with self.path(name).open("rb") as f: return pickle.load(f) - def _save(self, item: Any, name: str) -> None: + def save(self, item: Any, name: str) -> None: """Dump an item to cache with a name""" if self.verbose: print(f"Saving cached item {self.path(name)}") @@ -107,52 +104,13 @@ def reset(self) -> None: self.dir.mkdir() -class AutoMLCache(Cache): - def save(self, model: AutoML) -> None: - """Save the model""" - self._save(model, "model") - - def model(self) -> Optional[AutoML]: - """Returns the saved model if it can. - - In the case of an issue loading an existing model file, it will delete - this cache item. - """ - if "model" not in self: - return None - - # Try to load the model, if there was an issue, delete all cached items - # for the model and return None - try: - model = self._load("model") - except Exception: - model = None - print(traceback.format_exc()) - self.reset() - finally: - return model - - def backend_path(self) -> Path: - """The path for the backend of the automl model""" - return self.path("backend") - - @fixture -def cache(request: FixtureRequest) -> Callable[[str], Cache]: +def make_cache(request: FixtureRequest) -> Callable[[str], Cache]: """Gives the access to a cache.""" pytest_cache = request.config.cache assert pytest_cache is not None - cache_dir = pytest_cache.mkdir("autosklearn-cache") - return partial(Cache, cache_dir=cache_dir) - - -@fixture -def automl_cache(request: FixtureRequest) -> Callable[[str], AutoMLCache]: - """Gives access to an automl cache""" - pytest_cache = request.config.cache - assert pytest_cache is not None - cache_dir = pytest_cache.mkdir("autosklearn-cache") verbosity = request.config.getoption("verbose") - return partial(AutoMLCache, cache_dir=cache_dir, verbose=verbosity) + + return partial(Cache, cache_dir=cache_dir, verbose=verbosity) diff --git a/test/test_automl/cases.py b/test/test_automl/cases.py index e18da7f3e8..79b7402bbf 100644 --- a/test/test_automl/cases.py +++ b/test/test_automl/cases.py @@ -16,6 +16,8 @@ {no_ensemble} - Fit with no ensemble size {cached} - If the resulting case is then cached """ +from __future__ import annotations + from typing import Callable, Tuple from pathlib import Path @@ -23,10 +25,12 @@ import numpy as np from autosklearn.automl import AutoMLClassifier, AutoMLRegressor +from autosklearn.automl_common.common.utils.backend import Backend from pytest_cases import case, parametrize -from test.fixtures.caching import AutoMLCache +from test.fixtures.backend import copy_backend +from test.fixtures.caching import Cache @case(tags=["classifier"]) @@ -57,141 +61,175 @@ def case_regressor( @case(tags=["classifier", "fitted", "holdout", "cached"]) @parametrize("dataset", ["iris"]) def case_classifier_fitted_holdout_iterative( - automl_cache: Callable[[str], AutoMLCache], dataset: str, + make_cache: Callable[[str], Cache], + make_backend: Callable[..., Backend], make_automl_classifier: Callable[..., AutoMLClassifier], make_sklearn_dataset: Callable[..., Tuple[np.ndarray, ...]], ) -> AutoMLClassifier: """Case of a holdout fitted classifier""" resampling_strategy = "holdout-iterative-fit" - cache = automl_cache(f"case_classifier_{resampling_strategy}_{dataset}") + key = f"case_classifier_{resampling_strategy}_{dataset}" + cache = make_cache(key) + + if "model" not in cache: + # Make the model in the cache + model = make_automl_classifier( + temporary_directory=cache.path("backend"), + delete_tmp_folder_after_terminate=False, + resampling_strategy=resampling_strategy, + ) - model = cache.model() - if model is not None: - return model + X, y, Xt, yt = make_sklearn_dataset(name=dataset) + model.fit(X, y, dataset_name=dataset) - X, y, Xt, yt = make_sklearn_dataset(name=dataset) + # Save the model + cache.save(model, "model") - model = make_automl_classifier( - temporary_directory=cache.path("backend"), - delete_tmp_folder_after_terminate=False, - resampling_strategy=resampling_strategy, - ) - model.fit(X, y, dataset_name=dataset) + # Try the model from the cache + model = cache.load("model") + assert model is not None + model._backend = copy_backend(old=model._backend, new=make_backend()) - cache.save(model) return model @case(tags=["classifier", "fitted", "cv", "cached"]) @parametrize("dataset", ["iris"]) def case_classifier_fitted_cv( - automl_cache: Callable[[str], AutoMLCache], + make_cache: Callable[[str], Cache], dataset: str, + make_backend: Callable[..., Backend], make_automl_classifier: Callable[..., AutoMLClassifier], make_sklearn_dataset: Callable[..., Tuple[np.ndarray, ...]], ) -> AutoMLClassifier: """Case of a fitted cv AutoMLClassifier""" resampling_strategy = "cv" - cache = automl_cache(f"case_classifier_{resampling_strategy}_{dataset}") - model = cache.model() - if model is not None: - return model + key = f"case_classifier_{resampling_strategy}_{dataset}" + cache = make_cache(key) + + if "model" not in cache: + model = make_automl_classifier( + resampling_strategy=resampling_strategy, + temporary_directory=cache.path("backend"), + delete_tmp_folder_after_terminate=False, + ) + + X, y, Xt, yt = make_sklearn_dataset(name=dataset) + model.fit(X, y, dataset_name=dataset) - X, y, Xt, yt = make_sklearn_dataset(name=dataset) - model = make_automl_classifier( - resampling_strategy=resampling_strategy, - temporary_directory=cache.path("backend"), - delete_tmp_folder_after_terminate=False, - ) - model.fit(X, y, dataset_name=dataset) + cache.save(model, "model") + + # Try the model from the cache + model = cache.load("model") + assert model is not None + model._backend = copy_backend(old=model._backend, new=make_backend()) - cache.save(model) return model @case(tags=["regressor", "fitted", "holdout", "cached"]) @parametrize("dataset", ["boston"]) def case_regressor_fitted_holdout( - automl_cache: Callable[[str], AutoMLCache], + make_cache: Callable[[str], Cache], dataset: str, + make_backend: Callable[..., Backend], make_automl_regressor: Callable[..., AutoMLRegressor], make_sklearn_dataset: Callable[..., Tuple[np.ndarray, ...]], ) -> AutoMLRegressor: """Case of fitted regressor with cv resampling""" resampling_strategy = "holdout" - cache = automl_cache(f"case_regressor_{resampling_strategy}_{dataset}") - model = cache.model() - if model is not None: - return model + key = f"case_regressor_{resampling_strategy}_{dataset}" + cache = make_cache(key) + + if "model" not in cache: + model = make_automl_regressor( + temporary_directory=cache.path("backend"), + resampling_strategy=resampling_strategy, + delete_tmp_folder_after_terminate=False, + ) + + X, y, Xt, yt = make_sklearn_dataset(name=dataset) + model.fit(X, y, dataset_name=dataset) - X, y, Xt, yt = make_sklearn_dataset(name=dataset) - model = make_automl_regressor( - resampling_strategy=resampling_strategy, - temporary_directory=cache.path("backend"), - delete_tmp_folder_after_terminate=False, - ) - model.fit(X, y, dataset_name=dataset) + cache.save(model, "model") + + # Try the model from the cache + model = cache.load("model") + assert model is not None + + model._backend = copy_backend(old=model._backend, new=make_backend()) - cache.save(model) return model @case(tags=["regressor", "fitted", "cv", "cached"]) @parametrize("dataset", ["boston"]) def case_regressor_fitted_cv( - automl_cache: Callable[[str], AutoMLCache], + make_cache: Callable[[str], Cache], dataset: str, + make_backend: Callable[..., Backend], make_automl_regressor: Callable[..., AutoMLRegressor], make_sklearn_dataset: Callable[..., Tuple[np.ndarray, ...]], ) -> AutoMLRegressor: """Case of fitted regressor with cv resampling""" resampling_strategy = "cv" - cache = automl_cache(f"case_regressor_{resampling_strategy}_{dataset}") - model = cache.model() - if model is not None: - return model + key = f"case_regressor_{resampling_strategy}_{dataset}" + cache = make_cache(key) + + if "model" not in cache: + model = make_automl_regressor( + temporary_directory=cache.path("backend"), + resampling_strategy=resampling_strategy, + delete_tmp_folder_after_terminate=False, + ) + + X, y, Xt, yt = make_sklearn_dataset(name=dataset) + model.fit(X, y, dataset_name=dataset) - X, y, Xt, yt = make_sklearn_dataset(name=dataset) + cache.save(model, "model") - model = make_automl_regressor( - temporary_directory=cache.path("backend"), - delete_tmp_folder_after_terminate=False, - resampling_strategy=resampling_strategy, - ) - model.fit(X, y, dataset_name=dataset) + # Try the model from the cache + model = cache.load("model") + assert model is not None + + model._backend = copy_backend(old=model._backend, new=make_backend()) - cache.save(model) return model @case(tags=["classifier", "fitted", "no_ensemble", "cached"]) @parametrize("dataset", ["iris"]) def case_classifier_fitted_no_ensemble( - automl_cache: Callable[[str], AutoMLCache], + make_cache: Callable[[str], Cache], dataset: str, + make_backend: Callable[..., Backend], make_automl_classifier: Callable[..., AutoMLClassifier], make_sklearn_dataset: Callable[..., Tuple[np.ndarray, ...]], ) -> AutoMLClassifier: """Case of a fitted classifier but enemble_size was set to 0""" - cache = automl_cache(f"case_classifier_fitted_no_ensemble_{dataset}") + key = f"case_classifier_fitted_no_ensemble_{dataset}" + cache = make_cache(key) + + if "model" not in cache: + model = make_automl_classifier( + temporary_directory=cache.path("backend"), + delete_tmp_folder_after_terminate=False, + ensemble_size=0, + ) + + X, y, Xt, yt = make_sklearn_dataset(name=dataset) + model.fit(X, y, dataset_name=dataset) - model = cache.model() - if model is not None: - return model + cache.save(model, "model") - X, y, Xt, yt = make_sklearn_dataset(name=dataset) + model = cache.load("model") + assert model is not None - model = make_automl_classifier( - temporary_directory=cache.path("backend"), - delete_tmp_folder_after_terminate=False, - ensemble_size=0, - ) - model.fit(X, y, dataset_name=dataset) + model._backend = copy_backend(old=model._backend, new=make_backend()) - cache.save(model) return model From 3083628b97a67b250bc50951264eb2b5222b9b4a Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 10:39:38 +0200 Subject: [PATCH 077/117] Add valid static method to run --- autosklearn/ensemble_building/run.py | 18 ++++++++++++++++++ test/test_ensemble_builder/test_run.py | 21 +++++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/autosklearn/ensemble_building/run.py b/autosklearn/ensemble_building/run.py index 533465bf53..3afbe70e69 100644 --- a/autosklearn/ensemble_building/run.py +++ b/autosklearn/ensemble_building/run.py @@ -3,6 +3,7 @@ from typing import Tuple from pathlib import Path +import re import numpy as np @@ -14,6 +15,8 @@ class Run: """Class for storing information about a run used during ensemble building""" + re_model_dir = r'^([0-9]*)_([0-9]*)_([0-9]{1,3}\.[0-9]*)$' + def __init__(self, path: Path) -> None: """Creates a Run from a path point to the directory of a run @@ -141,3 +144,18 @@ def __repr__(self) -> str: def __eq__(self, other: object) -> bool: return isinstance(other, Run) and other.id == self.id + + @staticmethod + def valid(path: Path) -> bool: + """ + Parameters + ---------- + path: Path + The path to check + + Returns + ------- + bool + Whether the path is a valid run dir + """ + return re.match(Run.re_model_dir, path.name) is not None diff --git a/test/test_ensemble_builder/test_run.py b/test/test_ensemble_builder/test_run.py index 1a2abfbba5..e3227d67ff 100644 --- a/test/test_ensemble_builder/test_run.py +++ b/test/test_ensemble_builder/test_run.py @@ -4,6 +4,7 @@ import pickle import time +from pathlib import Path import numpy as np @@ -150,3 +151,23 @@ def test_equality(make_run: Callable[..., Run]) -> None: assert r1 != r3 assert r2 != r3 + + +@parametrize( + "name, expected", + [ + ("0_0_0.0", True), + ("1_152_64.24", True), + ("123412_3462_100.0", True), + ("tmp_sf12198", False), + ("tmp_0_0_0.0", False), + ], +) +def test_valid(name: str, expected: bool) -> None: + """ + Expects + ------- + * Should be able to correctly consider valid run dir names + """ + path = Path(name) + assert Run.valid(path) == expected From 195ed706b441038ef886ec2c2969037c1e36e7e4 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 10:39:59 +0200 Subject: [PATCH 078/117] Remove old test data --- .../test_3_models/__init__.py | 0 .../test_3_models/cases.py | 104 ---- .../predictions_ensemble_true.npy | Bin 160 -> 0 bytes .../.auto-sklearn/runs/0_1_0.0/0.1.0.0.model | 0 .../0_1_0.0/predictions_ensemble_0_1_0.0.npy | Bin 160 -> 0 bytes .../runs/0_1_0.0/predictions_test_0_1_0.0.npy | Bin 160 -> 0 bytes .../0_1_0.0/predictions_valid_0_1_0.0.npy | Bin 160 -> 0 bytes .../.auto-sklearn/runs/0_2_0.0/0.2.0.0.model | 0 .../0_2_0.0/predictions_ensemble_0_2_0.0.npy | Bin 160 -> 0 bytes .../runs/0_2_0.0/predictions_test_0_2_0.0.np | Bin 160 -> 0 bytes .../runs/0_2_0.0/predictions_test_0_2_0.0.npy | Bin 160 -> 0 bytes .../0_2_0.0/predictions_valid_0_2_0.0.npy | Bin 160 -> 0 bytes .../runs/0_3_100.0/0.3.0.0.model | 0 .../runs/0_3_100.0/0.3.100.0.model | 0 .../predictions_ensemble_0_3_100.0.npy | Bin 160 -> 0 bytes .../0_3_100.0/predictions_test_0_3_100.0.npy | Bin 160 -> 0 bytes .../0_3_100.0/predictions_valid_0_3_100.0.npy | Bin 160 -> 0 bytes .../.auto-sklearn/true_targets_ensemble.npy | Bin 160 -> 0 bytes .../test_3_models/test_3_models.py | 506 ------------------ .../test_ensemble_builder_mock_data.py | 373 ------------- 20 files changed, 983 deletions(-) delete mode 100644 test/test_ensemble_builder/test_3_models/__init__.py delete mode 100644 test/test_ensemble_builder/test_3_models/cases.py delete mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/predictions_ensemble_true.npy delete mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/0.1.0.0.model delete mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/predictions_ensemble_0_1_0.0.npy delete mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/predictions_test_0_1_0.0.npy delete mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/predictions_valid_0_1_0.0.npy delete mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/0.2.0.0.model delete mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy delete mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.np delete mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.npy delete mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_valid_0_2_0.0.npy delete mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/0.3.0.0.model delete mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/0.3.100.0.model delete mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/predictions_ensemble_0_3_100.0.npy delete mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/predictions_test_0_3_100.0.npy delete mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/predictions_valid_0_3_100.0.npy delete mode 100644 test/test_ensemble_builder/test_3_models/data/.auto-sklearn/true_targets_ensemble.npy delete mode 100644 test/test_ensemble_builder/test_3_models/test_3_models.py delete mode 100644 test/test_ensemble_builder/test_ensemble_builder_mock_data.py diff --git a/test/test_ensemble_builder/test_3_models/__init__.py b/test/test_ensemble_builder/test_3_models/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/test_ensemble_builder/test_3_models/cases.py b/test/test_ensemble_builder/test_3_models/cases.py deleted file mode 100644 index 3a103e1059..0000000000 --- a/test/test_ensemble_builder/test_3_models/cases.py +++ /dev/null @@ -1,104 +0,0 @@ -"""See the contents of TOY_DATA for full details - -/data -└── .auto-sklearn - ├── runs - │ ├── 0_1_0.0 - │ │ ├── 0.1.0.0.model - │ │ ├── predictions_ensemble_0_1_0.0.npy - │ │ ├── predictions_test_0_1_0.0.npy - │ │ └── predictions_valid_0_1_0.0.npy - │ ├── 0_2_0.0 - │ │ ├── 0.2.0.0.model - │ │ ├── predictions_ensemble_0_2_0.0.npy - │ │ ├── predictions_test_0_2_0.0.np - │ │ ├── predictions_test_0_2_0.0.npy - │ │ └── predictions_valid_0_2_0.0.npy - │ └── 0_3_100.0 - │ ├── 0.3.0.0.model - │ ├── 0.3.100.0.model - │ ├── predictions_ensemble_0_3_100.0.npy - │ ├── predictions_test_0_3_100.0.npy - │ └── predictions_valid_0_3_100.0.npy - ├── datamanager.pkl - ├── true_targets_ensemble.npy - └── predictions_ensemble_true.npy - -# Ensemble targets and predictions -Both `predictions_ensemble_targets` and `true_targets_ensemble` are the same set of data -* [ [1, 0], [0, 1], [0, 1], [0, 1], [0, 1], ] - -# 0_1_0.0 -All of run 0_1_0.0's predictions for "ensemble" "test" and "valid" are differing by -their predictions in the first key. -* [ [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], ] - -# 0_2_0.0, 0_3_100.0 -All of run 0_2_0.0's predictions for "ensemble" "test" and "valid" are exactly the same -as the `true_targets_ensemble` and `predictions_ensemble_true` -* [ [1, 0], [0, 1], [0, 1], [0, 1], [0, 1], ] - -# Models -The models are empty files. - -# Datamanager -The datamanager contains the iris dataset as the above numbers are made up with no -real corresponding models so the data from the datamanager can not be faked so easily. - -# Extra Notes -The extra `predictions_test_0_2_0.0.np` are required to make `test_max_models_on_disc` -pass as it factors into the memory estimation. Should probably fix that. -""" - -from typing import Callable - -import pickle -from pathlib import Path - -import numpy as np - -from autosklearn.automl_common.common.utils.backend import Backend -from autosklearn.constants import BINARY_CLASSIFICATION -from autosklearn.data.xy_data_manager import XYDataManager - -from pytest_cases import case - -HERE = Path(__file__).parent.resolve() -DATADIR = HERE / "data" - - -@case -def case_3_models( - tmp_path: Path, - make_backend: Callable[..., Backend], - make_sklearn_dataset: Callable[..., XYDataManager], -) -> Backend: - """Gives the backend for the this certain setup""" - path = tmp_path / "backend" - - # Create the datamanager that was used if needed - dm_path = DATADIR / ".auto-sklearn" / "datamanager.pkl" - - if not dm_path.exists(): - datamanager = make_sklearn_dataset( - name="breast_cancer", - task=BINARY_CLASSIFICATION, - feat_type="numerical", # They're all numerical - as_datamanager=True, - ) - - # For some reason, the old mock was just returning this array as: - # - # datamanger.data.get.return_value = array - # - model_3_path = DATADIR / ".auto-sklearn" / "runs" / "0_3_100.0" - test_preds = model_3_path / "predictions_test_0_3_100.0.npy" - array = np.load(test_preds) - - datamanager.data["Y_valid"] = array - datamanager.data["Y_test"] = array - - with dm_path.open("wb") as f: - pickle.dump(datamanager, f) - - return make_backend(path=path, template=DATADIR) diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/predictions_ensemble_true.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/predictions_ensemble_true.npy deleted file mode 100644 index fee3160c86d8995cb5ece8126aae88f13a964629..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/0.1.0.0.model b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/0.1.0.0.model deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/predictions_ensemble_0_1_0.0.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/predictions_ensemble_0_1_0.0.npy deleted file mode 100644 index 1b2320113d4ffe309dff0f30b4adb5c434b84d52..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= eXCxM+0{I%IItoUbItsN4aKOa?1&lBTg?s>2!WBvY diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/predictions_test_0_1_0.0.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/predictions_test_0_1_0.0.npy deleted file mode 100644 index 1b2320113d4ffe309dff0f30b4adb5c434b84d52..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= eXCxM+0{I%IItoUbItsN4aKOa?1&lBTg?s>2!WBvY diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/predictions_valid_0_1_0.0.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_1_0.0/predictions_valid_0_1_0.0.npy deleted file mode 100644 index 1b2320113d4ffe309dff0f30b4adb5c434b84d52..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= eXCxM+0{I%IItoUbItsN4aKOa?1&lBTg?s>2!WBvY diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/0.2.0.0.model b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/0.2.0.0.model deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy deleted file mode 100644 index fee3160c86d8995cb5ece8126aae88f13a964629..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.np b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.np deleted file mode 100644 index fee3160c86d8995cb5ece8126aae88f13a964629..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_test_0_2_0.0.npy deleted file mode 100644 index fee3160c86d8995cb5ece8126aae88f13a964629..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_valid_0_2_0.0.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_2_0.0/predictions_valid_0_2_0.0.npy deleted file mode 100644 index fee3160c86d8995cb5ece8126aae88f13a964629..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/0.3.0.0.model b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/0.3.0.0.model deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/0.3.100.0.model b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/0.3.100.0.model deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/predictions_ensemble_0_3_100.0.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/predictions_ensemble_0_3_100.0.npy deleted file mode 100644 index fee3160c86d8995cb5ece8126aae88f13a964629..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/predictions_test_0_3_100.0.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/predictions_test_0_3_100.0.npy deleted file mode 100644 index fee3160c86d8995cb5ece8126aae88f13a964629..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/predictions_valid_0_3_100.0.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/runs/0_3_100.0/predictions_valid_0_3_100.0.npy deleted file mode 100644 index fee3160c86d8995cb5ece8126aae88f13a964629..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g diff --git a/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/true_targets_ensemble.npy b/test/test_ensemble_builder/test_3_models/data/.auto-sklearn/true_targets_ensemble.npy deleted file mode 100644 index fee3160c86d8995cb5ece8126aae88f13a964629..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 160 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= fXCxM+0{I%IItoUbItsN4aKOdLfE}QV6UzqxRmv4g diff --git a/test/test_ensemble_builder/test_3_models/test_3_models.py b/test/test_ensemble_builder/test_3_models/test_3_models.py deleted file mode 100644 index fe20f13b36..0000000000 --- a/test/test_ensemble_builder/test_3_models/test_3_models.py +++ /dev/null @@ -1,506 +0,0 @@ -from __future__ import annotations - -import os -from pathlib import Path - -import numpy as np - -from autosklearn.automl_common.common.utils.backend import Backend -from autosklearn.constants import BINARY_CLASSIFICATION -from autosklearn.ensemble_building.builder import Y_TEST, Y_VALID, EnsembleBuilder -from autosklearn.metrics import roc_auc - -from pytest_cases import parametrize, parametrize_with_cases -from unittest.mock import Mock, patch - -import test.test_ensemble_builder.test_3_models.cases as cases -from test.conftest import DEFAULT_SEED -from test.fixtures.logging import MockLogger - - -@parametrize_with_cases("ensemble_backend", cases=cases) -def test_read(ensemble_backend: Backend) -> None: - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ) - - targets = ensbuilder.y_ensemble - assert targets is not None - - ensbuilder.compute_loss_per_model(targets) - - assert len(ensbuilder.run_predictions) == 3, ensbuilder.run_predictions.keys() - assert len(ensbuilder.runs) == 3, ensbuilder.runs - - runsdir = Path(ensemble_backend.get_runs_directory()) - preds_1 = runsdir / "0_1_0.0" / "predictions_ensemble_0_1_0.0.npy" - preds_2 = runsdir / "0_2_0.0" / "predictions_ensemble_0_2_0.0.npy" - preds_3 = runsdir / "0_3_100.0" / "predictions_ensemble_0_3_100.0.npy" - - assert ensbuilder.runs[str(preds_1)].loss == 0.5 - assert ensbuilder.runs[str(preds_2)].loss == 0.0 - assert ensbuilder.runs[str(preds_3)].loss == 0.0 - - -@parametrize( - "ensemble_nbest, max_models_on_disc, expected", - ( - (1, None, 1), - (1.0, None, 2), - (0.1, None, 1), - (0.9, None, 1), - (1, 2, 1), - (2, 1, 1), - ), -) -@parametrize_with_cases("ensemble_backend", cases=cases) -def test_nbest( - ensemble_backend: Backend, - ensemble_nbest: int | float, - max_models_on_disc: int | None, - expected: int, -) -> None: - """ - Parameters - ---------- - ensemble_backend: Backend - The backend to use. In this case, we specifically rely on the `setup_3_models` - setup. - - ensemble_nbest: int | float - The parameter to use for consider the n best, int being absolute and float being - fraction. - - max_models_on_disc: int | None - The maximum amount of models to keep on disk - - expected: int - The number of keys expected to be selected - - Expects - ------- - * get_n_best_preds should contain 2 keys - * The first key should be model 0_2_0_0 - """ - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=ensemble_nbest, - max_models_on_disc=max_models_on_disc, - ) - - targets = ensbuilder.y_ensemble - assert targets is not None - - ensbuilder.compute_loss_per_model(targets) - sel_keys = ensbuilder.get_n_best_preds() - - assert len(sel_keys) == expected - - expected_sel = os.path.join( - ensemble_backend.temporary_directory, - ".auto-sklearn/runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy", - ) - assert sel_keys[0] == expected_sel - - -@parametrize( - "max_models_on_disc, expected", - [ - # If None, no reduction - (None, 2), - # If Int, limit only on exceed - (4, 2), - (1, 1), - # If Float, translate float to # models. - # We mock so sizeof will return 500MB, this means that 500MB is required per run - # and we also need the 500MB extra as slack room. This means we can't fit 2 - # models in 1499MB but we can in 1500MB. We also don't include the dummy - # model which explains why even with 9999MB, we still only have 2 - (1499.0, 1), - (1500.0, 2), - (9999.0, 2), - ], -) -@parametrize_with_cases("ensemble_backend", cases=cases) -def test_max_models_on_disc( - ensemble_backend: Backend, - max_models_on_disc: int | float, - expected: int, -) -> None: - """ - Parameters - ---------- - ensemble_backend : Backend - The backend to use, relies on setup_3_models - - max_models_on_disc : int | float - The max_models_on_disc param to use - - expected : int - The expected number of selected models - - Expects - ------- - * The number of selected models should be as expected - """ - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=0, # important to find the test files - ensemble_nbest=4, - max_models_on_disc=max_models_on_disc, - ) - - with patch("autosklearn.ensemble_building.builder.sizeof") as mock: - mock.return_value = 500 - - targets = ensbuilder.y_ensemble - assert targets is not None - - ensbuilder.compute_loss_per_model(targets) - - sel_keys = ensbuilder.get_n_best_preds() - assert mock.called - print(mock.call_args_list) - assert len(sel_keys) == expected - - -@parametrize_with_cases("ensemble_backend", cases=cases) -def test_fall_back_nbest(ensemble_backend: Backend) -> None: - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=1, - ) - - targets = ensbuilder.y_ensemble - assert targets is not None - - ensbuilder.compute_loss_per_model(targets) - - for model in ["0_1_0.0", "0_2_0.0", "0_3_100.0"]: - filename = os.path.join( - ensemble_backend.temporary_directory, - f".auto-sklearn/runs/{model}/predictions_ensemble_{model}.npy", - ) - ensbuilder.runs[filename].loss = -1 - - sel_keys = ensbuilder.get_n_best_preds() - - best_model = "0_1_0.0" - expected = os.path.join( - ensemble_backend.temporary_directory, - f".auto-sklearn/runs/{best_model}/predictions_ensemble_{best_model}.npy", - ) - - assert len(sel_keys) == 1 - assert sel_keys[0] == expected - - -@parametrize_with_cases("ensemble_backend", cases=cases) -def test_get_valid_test_preds(ensemble_backend: Backend) -> None: - """ - Parameters - ---------- - ensemble_backend : Backend - The ensemble backend to use with the setup_3_models setup - """ - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=1, - ) - - # There are 3 models in the setup - # * Run 1 is the dummy run - # * Run 2 and Run 3 share the same predictions - # -> Run 2 is selected with ensemble_nbest = 1 - paths = [ - os.path.join( - ensemble_backend.temporary_directory, - f".auto-sklearn/runs/{model}/predictions_ensemble_{model}.npy", - ) - for model in ["0_1_0.0", "0_2_0.0", "0_3_100.0"] - ] - - targets = ensbuilder.y_ensemble - assert targets is not None - - ensbuilder.compute_loss_per_model(targets) - - sel_keys = ensbuilder.get_n_best_preds() - assert len(sel_keys) == 1 - - ensbuilder.get_valid_test_preds(selected_keys=sel_keys) - - # Number of read files should be three and contain those of the models in the setup - assert set(ensbuilder.run_predictions.keys()) == set(paths) - - selected = sel_keys - non_selected = set(paths) - set(sel_keys) - - # not selected --> should still be None - for key in non_selected: - assert ensbuilder.run_predictions[key][Y_VALID] is None - assert ensbuilder.run_predictions[key][Y_TEST] is None - - # selected --> read valid and test predictions - for key in selected: - assert ensbuilder.run_predictions[key][Y_VALID] is not None - assert ensbuilder.run_predictions[key][Y_TEST] is not None - - -@parametrize_with_cases("ensemble_backend", cases=cases) -def test_ensemble_builder_predictions(ensemble_backend: Backend) -> None: - """ - Parameters - ---------- - ensemble_backend : Backend - The ensemble backend to use with the setup_3_models setup - - Expects - ------- - * The validation and test sets should both have equal predictions for them? - * Since model 0_2_0.0 has predictions exactly equal to the targets, it should - recieve full weight and that the predictions should be identical to that models - predictions - """ - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=2, - ) - - targets = ensbuilder.y_ensemble - assert targets is not None - - ensbuilder.compute_loss_per_model(targets) - - d2 = os.path.join( - ensemble_backend.temporary_directory, - ".auto-sklearn/runs/0_2_0.0/predictions_ensemble_0_2_0.0.npy", - ) - - sel_keys = ensbuilder.get_n_best_preds() - assert len(sel_keys) > 0 - - ensemble = ensbuilder.fit_ensemble(selected_keys=sel_keys) - - assert ensemble is not None - print(ensemble, sel_keys) - - n_sel_valid, n_sel_test = ensbuilder.get_valid_test_preds(selected_keys=sel_keys) - - # both valid and test prediction files are available - assert len(n_sel_valid) > 0 - assert n_sel_valid == n_sel_test - - y_valid = ensbuilder.predict( - set_="valid", - ensemble=ensemble, - selected_keys=n_sel_valid, - n_preds=len(sel_keys), - index_run=1, - ) - y_test = ensbuilder.predict( - set_="test", - ensemble=ensemble, - selected_keys=n_sel_test, - n_preds=len(sel_keys), - index_run=1, - ) - - # predictions for valid and test are the same - # --> should results in the same predictions - assert y_valid is not None - assert y_test is not None - np.testing.assert_array_almost_equal(y_valid, y_test) - - # since d2 provides perfect predictions - # it should get a higher weight - # so that y_valid should be exactly y_valid_d2 - y_valid_d2 = ensbuilder.run_predictions[d2][Y_VALID][:, 1] - np.testing.assert_array_almost_equal(y_valid, y_valid_d2) - - -@parametrize_with_cases("ensemble_backend", cases=cases) -def test_main(ensemble_backend: Backend) -> None: - """ - Parameters - ---------- - ensemble_backend : Backend - The ensemble_backend to use, this test relies on this specific case - - Expects - ------- - * There should be "run_predictions" and "runs" saved to file - * There should be 3 model reads - * There should be a hash for the preds read in - * The true targets should have been read in - * The length of the history returned by run should be the same as the iterations - performed. - * The run history should contain "optimization", "val" and "test" scores, each being - the same at 1.0 due to the setup of "setup_3_models". - """ - iters = 1 - - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ) - - run_history, ensemble_nbest, _, _, _ = ensbuilder.main( - time_left=np.inf, - iteration=iters, - return_predictions=False, - ) - - internals_dir = Path(ensemble_backend.internals_directory) - - assert ensbuilder.run_predictions_path.exists(), list(internals_dir.iterdir()) - assert ensbuilder.runs_path.exists(), list(internals_dir.iterdir()) - - # There should be three preds read - assert len(ensbuilder.run_predictions) == 3 - assert ensbuilder.last_hash is not None - assert ensbuilder.y_ensemble is not None - - # We expect as many iterations as the iters param - assert len(run_history) == iters - hist_item = run_history[0] - - # As the data loader loads the same val/train/test - # we expect 1.0 as score and all keys available - expected_performance = { - "ensemble_val_score": 1.0, - "ensemble_test_score": 1.0, - "ensemble_optimization_score": 1.0, - } - - assert all(key in hist_item for key in expected_performance) - assert all(hist_item[key] == score for key, score in expected_performance.items()) - assert "Timestamp" in hist_item - - -@parametrize_with_cases("ensemble_backend", cases=cases) -def test_limit( - ensemble_backend: Backend, - mock_logger: MockLogger, -) -> None: - """ - - Parameters - ---------- - ensemble_backend : Backend - The backend setup to use - - Fixtures - -------- - mock_logger: MockLogger - A logger to inject into the EnsembleBuilder for tracking calls - - Expects - ------- - * Running from (ensemble_nbest, read_at_most) = (10, 5) where a memory exception - occurs in each run, we expect ensemble_nbest to be halved continuously until - it reaches 0, at which point read_at_most is reduced directly to 1. - """ - expected_states = [(10, 5), (5, 5), (2, 5), (1, 5), (0, 1)] - - starting_state = expected_states[0] - intermediate_states = expected_states[1:-1] - final_state = expected_states[-1] - - starting_nbest, starting_read_at_most = starting_state - - ensbuilder = EnsembleBuilder( - backend=ensemble_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - ensemble_nbest=starting_nbest, - read_at_most=starting_read_at_most, - memory_limit=1, - ) - - # Force a memory error to occur - ensbuilder.predict = Mock(side_effect=MemoryError) # type: ignore - ensbuilder.logger = mock_logger # Mock its logger - - def mtime_mock(filename: str) -> float: - """TODO, not really sure why we have to force these""" - path = Path(filename) - mtimes = { - # At second 0 - "predictions_ensemble_0_1_0.0.npy": 0.0, - "predictions_valid_0_1_0.0.npy": 0.1, - "predictions_test_0_1_0.0.npy": 0.2, - # At second 1 - "predictions_ensemble_0_2_0.0.npy": 1.0, - "predictions_valid_0_2_0.0.npy": 1.1, - "predictions_test_0_2_0.0.npy": 1.2, - # At second 2 - "predictions_ensemble_0_3_100.0.npy": 2.0, - "predictions_valid_0_3_100.0.npy": 2.1, - "predictions_test_0_3_100.0.npy": 2.2, - } - return mtimes[path.name] - - with patch("os.path.getmtime") as mtime: - mtime.side_effect = mtime_mock - - starting_state = (starting_nbest, starting_read_at_most) - assert (ensbuilder.ensemble_nbest, ensbuilder.read_at_most) == starting_state - - intermediate_states = [(5, 5), (2, 5), (1, 5), (0, 1)] - for i, exp_state in enumerate(intermediate_states, start=1): - ensbuilder.run(time_left=1000, iteration=0, pynisher_context="fork") - - assert ensbuilder.runs_path.exists() - assert not ensbuilder.run_predictions_path.exists() - - assert mock_logger.warning.call_count == i # type: ignore - - assert (ensbuilder.ensemble_nbest, ensbuilder.read_at_most) == exp_state - - # At this point, when we've reached (ensemble_nbest, read_at_most) = (0, 1), - # we can still run the ensbulder but it should just raise an error and not - # change it's internal state - ensbuilder.run(time_left=1000, iteration=0, pynisher_context="fork") - - assert ensbuilder.runs_path.exists() - assert not ensbuilder.run_predictions_path.exists() - - assert (ensbuilder.ensemble_nbest, ensbuilder.read_at_most) == final_state - - warning_call_count = mock_logger.warning.call_count # type: ignore - error_call_count = mock_logger.error.call_count # type: ignore - - assert warning_call_count == len(intermediate_states) - assert error_call_count == 1 - - for call_arg in mock_logger.error.call_args_list: # type: ignore - assert "Memory Exception -- Unable to further reduce" in str(call_arg) diff --git a/test/test_ensemble_builder/test_ensemble_builder_mock_data.py b/test/test_ensemble_builder/test_ensemble_builder_mock_data.py deleted file mode 100644 index 18d0643b05..0000000000 --- a/test/test_ensemble_builder/test_ensemble_builder_mock_data.py +++ /dev/null @@ -1,373 +0,0 @@ -from __future__ import annotations - -from typing import Callable - -import time -from pathlib import Path - -import numpy as np - -from autosklearn.automl_common.common.utils.backend import Backend -from autosklearn.constants import BINARY_CLASSIFICATION -from autosklearn.data.xy_data_manager import XYDataManager -from autosklearn.ensemble_building import EnsembleBuilder, Run -from autosklearn.metrics import roc_auc - -from pytest_cases import fixture, parametrize -from unittest.mock import patch - -from test.conftest import DEFAULT_SEED - - -def test_available_runs(make_ensemble_builder: Callable[..., EnsembleBuilder]) -> None: - builder = make_ensemble_builder() - runsdir = Path(builder.backend.get_runs_directory()) - - ids = {(0, i, 0.0) for i in range(1, 10)} - paths = [runsdir / f"{s}_{n}_{b}" for s, n, b in ids] - - for path in paths: - path.mkdir() - - available_runs = builder.available_runs() - - for run_id in available_runs.keys(): - assert run_id in ids - - -@parametrize("n_models", [20, 50]) -@parametrize("mem_model", [1, 10, 100, 1000]) -@parametrize("mem_largest_mult", [1, 2, 10]) -@parametrize("n_expected", [1, 3, 5, 10]) -@parametrize("largest_is_best", [True, False]) -def test_candidates_memory_limit( - n_models: int, - mem_model: int, - mem_largest_mult: int, - n_expected: int, - largest_is_best: bool, - backend: Backend, - make_ensemble_builder: Callable[..., EnsembleBuilder], - make_run: Callable[..., Run], -) -> None: - """ - Parameters - ---------- - n_models : int - The number of models to have - - mem_model : int - The memory consumption per model - - mem_largest_mutl : int - How much the largest model takes (mem_largest = mem_per_model * mult) - - n_expected : int - How many models we expect the EnsembleBuilder to save - - largest_is_best: bool - Whether to include the largest models as one of the best models or as the worst. - - Fixtures - -------- - make_ensemble_builder: Callable[..., EnsembleBuilder] - make_run: Callable[..., Run] - - Note - ---- - We use the parameters here to calculate the `max_models_on_disc` arg to verify - that with that calculate, we do indeed selected that many models. - - mem_nbest = ... memory of the n best models - max_models_on_disc = float(mem_nbest + mem_largest_model) - - This is a bit backwards to calculate max_models_on_disc from what we expect but - it is much easier and still verifies behaviour. - - Expects - ------- - * The ensemble builder should select the expected number of models given the - calculated `max_models_on_disc`. - """ - runs = [ - make_run(id=n, loss=10 * n, mem_usage=mem_model, backend=backend) - for n in range(1, n_models + 1) - ] - - mem_largest = mem_model * mem_largest_mult - if largest_is_best: - runs[-1]._mem_usage = mem_largest - else: - runs[0]._mem_usage = mem_largest - - nbest = sorted(runs, key=lambda run: run.loss)[:n_expected] - mem_for_nbest = sum(run.mem_usage for run in nbest) - model_memory_limit = float(mem_for_nbest + mem_largest) # type: ignore - - builder = make_ensemble_builder( - max_models_on_disc=model_memory_limit, - backend=backend, - ) - - candidates, discarded = builder.candidates( - runs, - model_memory_limit=model_memory_limit, - ) - - # We expect to save the first n runs as those are the ones with thel lowest loss - expected = runs[:n_expected] - - assert expected == candidates - assert set(runs) - set(candidates) == set(discarded) - - -@parametrize("n_models", [50, 10, 2, 1]) -def test_max_models_on_disc_float_always_preserves_best_model( - n_models: int, - dummy_backend: Backend, -) -> None: - """ - Parameters - ---------- - n_models : int - The number of models to start with - - Fixtures - -------- - dummy_backend: Backend - Just a valid backend, contents don't matter for this test - - Expects - ------- - * The best model should always be selected even if the memory assigned for models - on disc does not allow for any models. This is because we need at least one. - """ - max_models_on_disc = 0.0 - - ensbuilder = EnsembleBuilder( - backend=dummy_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, # important to find the test files - max_models_on_disc=max_models_on_disc, - memory_limit=None, - ) - - runs = [ - Run( - seed=DEFAULT_SEED, - num_run=n + 1, - budget=0.0, - loss=10 * -n, - loaded=1, - mem_usage=50 * n, - ens_file=f"pred{n+1}", - ) - for n in range(n_models) - ] - best_model = min(runs, key=lambda run: run.loss) - - ensbuilder._runs = {run.ens_file: run for run in runs} - ensbuilder._run_predictions = { - f"pred{n}": {Y_ENSEMBLE: np.array([1])} for n in range(1, n_models + 1) - } - - sel_keys = ensbuilder.get_n_best_preds() - assert [best_model.ens_file] == sel_keys - - -@parametrize( - "performance_range_threshold, expected_selected", - ((0.0, 4), (0.1, 4), (0.3, 3), (0.5, 2), (0.6, 2), (0.8, 1), (1.0, 1)), -) -def test_performance_range_threshold( - performance_range_threshold: float, - expected_selected: int, - dummy_backend: Backend, -) -> None: - """ - Parameters - ---------- - performance_range_threshold : float - THe performance range threshold to use - - expected_selected : int - The number of selected models for there to be - - Fixtures - -------- - dummy_backend: Backend - A valid backend whose contents don't matter for this test - - Expects - ------- - * Expects the given amount of models to be selected given a performance range - threshold. - """ - ensbuilder = EnsembleBuilder( - backend=dummy_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, - performance_range_threshold=performance_range_threshold, - ) - - ensbuilder._runs = { - "A": Run(seed=DEFAULT_SEED, num_run=1, loss=-1, loaded=-1, ens_file=""), - "B": Run(seed=DEFAULT_SEED, num_run=2, loss=-2, loaded=-1, ens_file=""), - "C": Run(seed=DEFAULT_SEED, num_run=3, loss=-3, loaded=-1, ens_file=""), - "D": Run(seed=DEFAULT_SEED, num_run=4, loss=-4, loaded=-1, ens_file=""), - "E": Run(seed=DEFAULT_SEED, num_run=5, loss=-5, loaded=-1, ens_file=""), - } - ensbuilder._run_predictions = { - name: {preds_key: np.array([1]) for preds_key in (Y_ENSEMBLE, Y_VALID, Y_TEST)} - for name in ensbuilder._runs - } - - sel_keys = ensbuilder.get_n_best_preds() - assert len(sel_keys) == expected_selected - - -@parametrize( - "performance_range_threshold, ensemble_nbest, expected_selected", - ( - (0.0, 1, 1), - (0.0, 1.0, 4), - (0.1, 2, 2), - (0.3, 4, 3), - (0.5, 1, 1), - (0.6, 10, 2), - (0.8, 0.5, 1), - (1, 1.0, 1), - ), -) -def test_performance_range_threshold_with_ensemble_nbest( - performance_range_threshold: float, - ensemble_nbest: int | float, - expected_selected: int, - dummy_backend: Backend, -) -> None: - """ - Parameters - ---------- - performance_range_threshold : float - ensemble_nbest : int | float - expected_selected : int - The number of models expected to be selected - - Fixtures - -------- - dummy_backend: Backend - A backend whose contents are valid and don't matter for this test - - Expects - ------- - * Given the setup of params for test_performance_range_threshold and ensemble_nbest, - the expected number of models should be selected. - """ - ensbuilder = EnsembleBuilder( - backend=dummy_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - seed=DEFAULT_SEED, - ensemble_nbest=ensemble_nbest, - performance_range_threshold=performance_range_threshold, - max_models_on_disc=None, - ) - ensbuilder._runs = { - "A": Run(seed=DEFAULT_SEED, num_run=1, loss=-1, loaded=-1, ens_file=""), - "B": Run(seed=DEFAULT_SEED, num_run=2, loss=-2, loaded=-1, ens_file=""), - "C": Run(seed=DEFAULT_SEED, num_run=3, loss=-3, loaded=-1, ens_file=""), - "D": Run(seed=DEFAULT_SEED, num_run=4, loss=-4, loaded=-1, ens_file=""), - "E": Run(seed=DEFAULT_SEED, num_run=5, loss=-5, loaded=-1, ens_file=""), - } - ensbuilder._run_predictions = { - name: {pred_name: np.array([1]) for pred_name in (Y_ENSEMBLE, Y_VALID, Y_TEST)} - for name in ensbuilder._runs - } - sel_keys = ensbuilder.get_n_best_preds() - - assert len(sel_keys) == expected_selected - - -@parametrize("time_buffer", [1, 5]) -@parametrize("duration", [10, 20]) -def test_run_end_at(dummy_backend: Backend, time_buffer: int, duration: int) -> None: - """ - Parameters - ---------- - time_buffer: int - How much time buffer to give to the ensemble builder - - duration: int - How long to run the ensemble builder for - - Fixtures - -------- - dummy_backend: Backend - A valid backend whose contents don't matter for this test - - Expects - ------- - * The limits enforced by pynisher should account for the time_buffer and duration - to run for + a little bit of overhead that gets rounded to a second. - """ - with patch("pynisher.enforce_limits") as pynisher_mock: - ensbuilder = EnsembleBuilder( - backend=dummy_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - ) - - ensbuilder.run( - end_at=time.time() + duration, - iteration=1, - time_buffer=time_buffer, - pynisher_context="forkserver", - ) - - # The 1 comes from the small overhead in conjuction with rounding down - expected = duration - time_buffer - 1 - assert pynisher_mock.call_args_list[0][1]["wall_time_in_s"] == expected - - -def test_can_load_pickled_ndarray_of_dtype_object(dummy_backend: Backend) -> None: - """ - Fixture - ------- - dummy_backend: Backend - A backend with a datamanger so it will load - - Expects - ------- - * EnsembleBuilder should be able to load np.ndarray's that were saved as a pickled - object, which happens when the np.ndarray's are of dtype object. - - """ - # TODO Should probably remove this test - # - # I'm not sure why the predictions are stored as pickled objects sometimes - # but that's a security vunerability to users using auto-sklearn. - # - ensbuilder = EnsembleBuilder( - backend=dummy_backend, - dataset_name="TEST", - task_type=BINARY_CLASSIFICATION, - metric=roc_auc, - ) - - # By specifiyng dtype object, we force it into saving as a pickle - x = np.array([1, 2, 3, 4], dtype=object) - - path = Path(dummy_backend.internals_directory) / "test.npy" - with path.open("wb") as f: - # This is the default value (allow_pickle=True) but we explicitly state it - np.save(f, x, allow_pickle=True) - - loaded_x = ensbuilder._predictions_from(path) - - np.testing.assert_equal(x, loaded_x) From 86d298aae284b658b426d28357feeb12c7d178d6 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 10:41:34 +0200 Subject: [PATCH 079/117] Add filter for bad run dirs --- autosklearn/ensemble_building/builder.py | 2 +- .../test_ensemble_builder.py | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index f5fea12642..ba534df7ca 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -202,7 +202,7 @@ def available_runs(self) -> dict[RunID, Run]: A dictionary from RunId's to the available runs """ runs_dir = Path(self.backend.get_runs_directory()) - runs = [Run(path=dir) for dir in runs_dir.iterdir()] + runs = iter(Run(path=dir) for dir in runs_dir.iterdir() if Run.valid(dir)) return {run.id: run for run in runs} def targets(self, kind: str = "ensemble") -> np.ndarray | None: diff --git a/test/test_ensemble_builder/test_ensemble_builder.py b/test/test_ensemble_builder/test_ensemble_builder.py index 8735068974..f27ad5e9da 100644 --- a/test/test_ensemble_builder/test_ensemble_builder.py +++ b/test/test_ensemble_builder/test_ensemble_builder.py @@ -55,6 +55,25 @@ def test_available_runs(builder: EnsembleBuilder) -> None: assert run_id in ids +def test_available_runs_with_bad_dir_contained(builder: EnsembleBuilder) -> None: + """ + Expects + ------- + * Should ignore dirs that aren't in format + """ + runsdir = Path(builder.backend.get_runs_directory()) + + ids = {(0, i, 0.0) for i in range(1, 10)} + paths = [runsdir / f"{s}_{n}_{b}" for s, n, b in ids] + + bad_path = runsdir / "Im_a_bad_path" + + for path in paths + [bad_path]: + path.mkdir() + + available_runs = builder.available_runs() + assert len(available_runs) == len(paths) + def test_requires_loss_update_with_modified_runs( builder: EnsembleBuilder, make_run: Callable[..., Run], From 1c1828ed9d098f3e79e62b2813ae79b05ff516bf Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 10:50:08 +0200 Subject: [PATCH 080/117] Made `main` args optional --- autosklearn/ensemble_building/builder.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index ba534df7ca..77d32d7d51 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -355,8 +355,8 @@ def run( def main( self, - time_left: float, - iteration: int, + time_left: float | None = None, + iteration: int | None = None, ) -> tuple[list[dict[str, Any]], int | float]: """Run the main loop of ensemble building @@ -387,11 +387,12 @@ def main( port=self.logger_port, ) - self.start_time = time.time() - - used_time = time.time() - self.start_time - left_for_iter = time_left - used_time - self.logger.debug(f"Starting iteration {iteration}, time left: {left_for_iter}") + if time_left is not None: + self.start_time = time.time() + used_time = time.time() - self.start_time + left_for_iter = time_left - used_time + itr = iteration if str(iteration) is not None else "" + self.logger.debug(f"Starting iteration {itr}, time left: {left_for_iter}") # Can't load data, exit early if not os.path.exists(self.backend._get_targets_ensemble_filename()): From ddace9d4ee8218f4006214ace98bc152de8232c7 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 16:41:02 +0200 Subject: [PATCH 081/117] Fix check for updated runs --- autosklearn/ensemble_building/builder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 77d32d7d51..51f55fcf1a 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -513,7 +513,7 @@ def main( updated_candidates = iter(run in candidates for run in requires_update) - if not any(difference) or any(updated_candidates): + if not any(difference) and not any(updated_candidates): self.logger.info("All ensemble candidates the same, no update required") return self.ensemble_history, self.ensemble_nbest From 8a393ea51a7f87ef100942ec2fc70e81c44286b5 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 17:09:38 +0200 Subject: [PATCH 082/117] Make `main` raise errors --- autosklearn/ensemble_building/builder.py | 136 +++++++++++------------ 1 file changed, 67 insertions(+), 69 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 51f55fcf1a..6ca2cd9dcd 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -304,8 +304,11 @@ def run( logger=self.logger, context=context, )(self.main) + safe_ensemble_script(time_left, iteration) - if safe_ensemble_script.exit_status is pynisher.MemorylimitException: + + status = safe_ensemble_script.exit_status + if isinstance(status, pynisher.MemorylimitException): # if ensemble script died because of memory error, # reduce nbest to reduce memory consumption and try it again @@ -348,6 +351,8 @@ def run( "less ensemble_nbest: %d" % self.ensemble_nbest ) return [], self.ensemble_nbest + elif isinstance(status, pynisher.AnythingException): + return ([], self.ensemble_nbest) else: return safe_ensemble_script.result @@ -397,7 +402,7 @@ def main( # Can't load data, exit early if not os.path.exists(self.backend._get_targets_ensemble_filename()): self.logger.debug(f"No targets for ensemble: {traceback.format_exc()}") - return self.ensemble_history, self.ensemble_nbest + raise RuntimeError("No targets for ensemble") # Load in information from previous candidates and also runs available_runs = self.available_runs() @@ -411,7 +416,7 @@ def main( if len(runs) == 0: self.logger.debug("Found no runs") - return self.ensemble_history, self.ensemble_nbest + raise RuntimeError("Found no runs") # Calculate the loss for those that require it requires_update = self.requires_loss_update(runs) @@ -466,7 +471,7 @@ def main( intersect = valid_subset & test_subset if len(intersect) == 0 and len(test_subset) > 0 and len(valid_subset) > 0: self.logger.error("valid_set and test_set not empty but do not overlap") - return self.ensemble_history, self.ensemble_nbest + raise RuntimeError("valid_set and test_set not empty but do not overlap") # Try to use the runs which have the most kinds of preds, otherwise just use all if len(intersect) > 0: @@ -518,6 +523,7 @@ def main( return self.ensemble_history, self.ensemble_nbest targets = cast(np.ndarray, self.targets("ensemble")) # Sure they exist + ensemble = self.fit_ensemble( candidates, targets=targets, @@ -528,53 +534,51 @@ def main( random_state=self.random_state, ) - if ensemble is not None: - self.logger.info(str(ensemble)) - ens_perf = ensemble.get_validation_performance() - self.validation_performance_ = min(self.validation_performance_, ens_perf) - self.backend.save_ensemble(ensemble, iteration, self.seed) # type: ignore + self.logger.info(str(ensemble)) + ens_perf = ensemble.get_validation_performance() + self.validation_performance_ = min(self.validation_performance_, ens_perf) + self.backend.save_ensemble(ensemble, iteration, self.seed) # type: ignore # Continue with evaluating the ensemble after making some space - if ensemble is not None: - performance_stamp = {"Timestamp": pd.Timestamp.now()} - - for kind, score_name, models in [ - ("ensemble", "optimization", candidates), - ("valid", "val", valid_models), - ("test", "test", test_models), - ]: - if len(models) == 0: - continue - - pred_targets = self.targets(kind) - if pred_targets is None: - self.logger.warning(f"No ensemble targets for {kind}") - continue - - run_preds = [ - r.predictions(kind, precision=self.precision) for r in models - ] - pred = ensemble.predict(run_preds) - - # Pretty sure this whole step is uneeded but left over and afraid - # to touch - if self.task_type == BINARY_CLASSIFICATION: - pred = pred[:, 1] - - if pred.ndim == 1 or pred.shape[1] == 1: - pred = np.vstack( - ((1 - pred).reshape((1, -1)), pred.reshape((1, -1))) - ).transpose() - - score = calculate_score( - solution=pred_targets, - prediction=pred, - task_type=self.task_type, - metric=self.metric, - scoring_functions=None, - ) - performance_stamp[f"ensemble_{score_name}_score"] = score - self.ensemble_history.append(performance_stamp) + performance_stamp = {"Timestamp": pd.Timestamp.now()} + + for kind, score_name, models in [ + ("ensemble", "optimization", candidates), + ("valid", "val", valid_models), + ("test", "test", test_models), + ]: + if len(models) == 0: + continue + + pred_targets = self.targets(kind) + if pred_targets is None: + self.logger.warning(f"No ensemble targets for {kind}") + continue + + run_preds = [ + r.predictions(kind, precision=self.precision) for r in models + ] + pred = ensemble.predict(run_preds) + + # Pretty sure this whole step is uneeded but left over and afraid + # to touch + if self.task_type == BINARY_CLASSIFICATION: + pred = pred[:, 1] + + if pred.ndim == 1 or pred.shape[1] == 1: + pred = np.vstack( + ((1 - pred).reshape((1, -1)), pred.reshape((1, -1))) + ).transpose() + + score = calculate_score( + solution=pred_targets, + prediction=pred, + task_type=self.task_type, + metric=self.metric, + scoring_functions=None, + ) + performance_stamp[f"ensemble_{score_name}_score"] = score + self.ensemble_history.append(performance_stamp) return self.ensemble_history, self.ensemble_nbest @@ -750,7 +754,7 @@ def fit_ensemble( metric: Scorer | None = None, precision: int | None = None, random_state: int | np.random.RandomState | None = None, - ) -> EnsembleSelection | None: + ) -> EnsembleSelection: """TODO Parameters @@ -768,8 +772,6 @@ def fit_ensemble( metric = metric if metric is not None else self.metric rs = random_state if random_state is not None else self.random_state - ensemble: EnsembleSelection | None - ensemble = EnsembleSelection( ensemble_size=size, task_type=task, @@ -780,24 +782,20 @@ def fit_ensemble( self.logger.debug(f"Fitting ensemble on {len(runs)} models") start_time = time.time() - try: - precision = precision if precision is not None else self.precision - predictions_train = [ - run.predictions("ensemble", precision=precision) for run in runs - ] + precision = precision if precision is not None else self.precision + predictions_train = [ + run.predictions("ensemble", precision=precision) for run in runs + ] - ensemble.fit( - predictions=predictions_train, - labels=targets, - identifiers=[run.id for run in runs], - ) - except Exception as e: - self.logger.error(f"Caught error {e}: {traceback.format_exc()}") - ensemble = None - finally: - duration = time.time() - start_time - self.logger.debug(f"Fitting the ensemble took {duration} seconds.") - return ensemble + ensemble.fit( + predictions=predictions_train, + labels=targets, + identifiers=[run.id for run in runs], + ) + + duration = time.time() - start_time + self.logger.debug(f"Fitting the ensemble took {duration} seconds.") + return ensemble def requires_deletion( self, From c0ed290dfe9518560616285ee5d9735f4a957b10 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 17:09:57 +0200 Subject: [PATCH 083/117] Fix default value for ensemble builder `main` --- autosklearn/ensemble_building/builder.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 6ca2cd9dcd..98b6955632 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -361,7 +361,7 @@ def run( def main( self, time_left: float | None = None, - iteration: int | None = None, + iteration: int = 0, ) -> tuple[list[dict[str, Any]], int | float]: """Run the main loop of ensemble building @@ -375,10 +375,10 @@ def main( Parameters ---------- - time_left : float + time_left : float | None = None How much time is left for this run - iteration : int + iteration : int = 0 The iteration of this run Returns From 94f869dcfa03ca368bc8f7c5403c3893eaa1dac8 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 18:06:46 +0200 Subject: [PATCH 084/117] Test valid ensemble with real runs --- autosklearn/util/functional.py | 21 ++-- .../test_ensemble_builder.py | 6 + .../test_ensemble_builder_real.py | 108 +++++++++--------- 3 files changed, 74 insertions(+), 61 deletions(-) diff --git a/autosklearn/util/functional.py b/autosklearn/util/functional.py index 1e1a94e207..59604331be 100644 --- a/autosklearn/util/functional.py +++ b/autosklearn/util/functional.py @@ -109,16 +109,21 @@ def cut( if isinstance(where, int): lst = list(itr) return lst[:where], lst[where:] - else: - a = [] - itr2 = iter(itr) - for x in itr2: - if not where(x): - a.append(x) - else: - break + a = [] + itr2 = iter(itr) + broke = False + for x in itr2: + if not where(x): + a.append(x) + else: + broke = True + break + + if broke: return a, [x] + list(itr2) + else: + return a, [] def split( diff --git a/test/test_ensemble_builder/test_ensemble_builder.py b/test/test_ensemble_builder/test_ensemble_builder.py index f27ad5e9da..5c41253d5d 100644 --- a/test/test_ensemble_builder/test_ensemble_builder.py +++ b/test/test_ensemble_builder/test_ensemble_builder.py @@ -118,6 +118,7 @@ def test_candidates_no_filters( """ Expects ------- + * Should have nothing in common between candidates and discarded * Should not filter out any viable runs if no filters set. Here a viable run has a loss and ensemble predictions """ @@ -132,6 +133,7 @@ def test_candidates_no_filters( performance_range_threshold=None, ) + assert len(set(candidates) & discarded) == 0 assert len(candidates) == len(runs) assert len(discarded) == 0 @@ -142,6 +144,7 @@ def test_candidates_filters_runs_with_no_predictions( """ Expects ------- + * Should have nothing in common between candidates and discarded * Should filter out runs with no "ensemble" predictions """ bad_runs = [make_run(predictions=None) for _ in range(5)] @@ -152,6 +155,7 @@ def test_candidates_filters_runs_with_no_predictions( candidates, discarded = builder.candidate_selection(runs, dummy) + assert len(set(candidates) & discarded) == 0 assert len(candidates) == 1 assert len(discarded) == len(bad_runs) assert candidates[0].pred_path("ensemble").exists() @@ -197,6 +201,8 @@ def test_candidates_filters_out_better_than_dummy( runs, dummy_run, better_than_dummy=True ) + assert set(candidates) + assert len(candidates) == 3 assert all(run.loss < dummy_run.loss for run in candidates) diff --git a/test/test_ensemble_builder/test_ensemble_builder_real.py b/test/test_ensemble_builder/test_ensemble_builder_real.py index 6788a22b12..040599f5ce 100644 --- a/test/test_ensemble_builder/test_ensemble_builder_real.py +++ b/test/test_ensemble_builder/test_ensemble_builder_real.py @@ -2,79 +2,81 @@ from typing import Callable -from pathlib import Path -from shutil import rmtree - from autosklearn.automl import AutoML -from autosklearn.automl_common.common.utils.backend import Backend from autosklearn.ensemble_building.builder import EnsembleBuilder from pytest_cases import parametrize_with_cases +from unittest.mock import MagicMock, patch import test.test_automl.cases as cases +from test.conftest import DEFAULT_SEED @parametrize_with_cases("automl", cases=cases, has_tag="fitted") -def case_ensemble_builder_with_real_runs( - tmp_path: Path, +def case_real_runs( automl: AutoML, - make_backend: Callable[..., Backend], + make_ensemble_builder: Callable[..., EnsembleBuilder], ) -> EnsembleBuilder: - """Gives the backend for from the cached automl instance in `test_automl/cases.py` - - We do this by copying the backend produced from these cached automl runs to a new - tmp directory for the ensemble builder tests to run from - - We also delete ensemble building specific things so that ensemble sees them as - just a collection of runs and no previous ensemble building has been done. - """ - original_backend = automl._backend - backend_path = tmp_path / "backend" - - backend = make_backend(path=backend_path, template=original_backend) - assert backend.internals_directory != original_backend.internals_directory - - ensemble_dir = Path(backend.get_ensemble_dir()) - if ensemble_dir.exists(): - rmtree(ensemble_dir) - - ensemble_hist = Path(backend.internals_directory) / "ensemble_history.json" - if ensemble_hist.exists(): - ensemble_hist.unlink() - - # This is extra information required to build the ensemble builder exactly - # as was created by the AutoML object - builder = EnsembleBuilder( - backend=backend, - dataset_name=automl._dataset_name, # type: ignore is not None - task_type=automl._task, # type: ignore is not None - metric=automl._metric, # type: ignore is not None + """Uses real runs from a fitted automl instance""" + builder = make_ensemble_builder( + backend=automl._backend, + metric=automl._metric, + task_type=automl._task, + dataset_name=automl._dataset_name, seed=automl._seed, - max_models_on_disc=automl._max_models_on_disc, - memory_limit=automl._memory_limit, + logger_port=automl._logger_port, + random_state=DEFAULT_SEED, ) return builder -@parametrize_with_cases("builder", cases=case_ensemble_builder_with_real_runs) -def test_outputs(builder: EnsembleBuilder) -> None: +@parametrize_with_cases("builder", cases=case_real_runs) +def test_run_builds_valid_ensemble(builder: EnsembleBuilder) -> None: """ - Fixtures - -------- - builder: EnsembleBuilder - An EnsembleBuilder created from the contents of a real autosklearn AutoML run - Expects ------- - * Should generate cached items "ensemble_read_preds" and ensemble_read_losses" - * Should generate an ensembles directory which contains at least one ensemble + * The history returned should not be empty + * The generated ensemble should not be empty + * If any deleted, should be no overlap with those deleted and ensemble + * If any deleted, they should all be worse than those in the ensemble """ - builder.main(time_left=10, iteration=0) + # So we can capture the saved ensemble + mock_save = MagicMock() + builder.backend.save_ensemble = mock_save # type: ignore + + # So we can capture what was deleted + mock_delete = MagicMock() + builder.delete_runs = mock_delete # type: ignore + + # So we can capture the candidate runs used, we still wrap the actual fitting + with patch.object(builder, "fit_ensemble", wraps=builder.fit_ensemble) as mock_fit: + history, nbest = builder.main() + + assert history is not None + + ens, _, _ = mock_save.call_args[0] + assert len(ens.get_selected_model_identifiers()) > 0 + + ens_ids = set(ens.get_selected_model_identifiers()) + deleted = mock_delete.call_args[0][0] + + # If we deleted runs, we better make sure they're worse than what's + # in the ensemble + if len(deleted) > 0: + deleted_ids = {run.id for run in deleted} + assert len(ens_ids & deleted_ids) == 0 + + ensemble_candidates = mock_fit.call_args[0][0] + + best_deleted = min(deleted, key=lambda r: (r.loss, r.num_run)) + worst_candidate = max(ensemble_candidates, key=lambda r: (r.loss, r.num_run)) - for path in [builder.run_predictions_path, builder.runs_path]: - assert path.exists(), f"contents = {list(dir.iterdir())}" + a = (worst_candidate.loss, worst_candidate.num_run) + b = (best_deleted.loss, best_deleted.num_run) + assert a <= b - ens_dir = Path(builder.backend.get_ensemble_dir()) - assert ens_dir.exists() - assert len(list(ens_dir.iterdir())) > 0 +@parametrize_with_cases("builder", cases=case_real_runs) +def test_main(builder: EnsembleBuilder) -> None: + result = builder.run(1, time_left=10) + raise ValueError(x) From 4725ba65e53f3211209aaa1a6c86375950dd03f7 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 21:53:25 +0200 Subject: [PATCH 085/117] Rename parameter for manager --- autosklearn/automl.py | 4 ++-- autosklearn/ensemble_building/manager.py | 13 ++++++++----- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/autosklearn/automl.py b/autosklearn/automl.py index c400284849..6b701f546f 100644 --- a/autosklearn/automl.py +++ b/autosklearn/automl.py @@ -812,7 +812,7 @@ def fit( precision=self.precision, max_iterations=None, read_at_most=np.inf, - ensemble_memory_limit=self._memory_limit, + memory_limit=self._memory_limit, random_state=self._seed, logger_port=self._logger_port, pynisher_context=self._multiprocessing_context, @@ -1520,7 +1520,7 @@ def fit_ensemble( precision=precision if precision else self.precision, max_iterations=1, read_at_most=np.inf, - ensemble_memory_limit=self._memory_limit, + memory_limit=self._memory_limit, random_state=self._seed, logger_port=self._logger_port, pynisher_context=self._multiprocessing_context, diff --git a/autosklearn/ensemble_building/manager.py b/autosklearn/ensemble_building/manager.py index 54aa51471f..c9d901fe37 100644 --- a/autosklearn/ensemble_building/manager.py +++ b/autosklearn/ensemble_building/manager.py @@ -28,13 +28,13 @@ def __init__( dataset_name: str, task: int, metric: Scorer, - ensemble_size: int, - ensemble_nbest: int, + ensemble_size: int = 10, + ensemble_nbest: int | float = 100, seed: int, - precision: int, + precision: int = 32, max_iterations: Optional[int], read_at_most: int, - ensemble_memory_limit: Optional[int], + memory_limit: Optional[int], random_state: Union[int, np.random.RandomState], max_models_on_disc: Optional[float | int] = 100, logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, @@ -111,7 +111,10 @@ def __init__( The multiprocessing context for pynisher. One of spawn/fork/forkserver. """ + # TODO delete + # Not used, overwritten later self.start_time = start_time + self.time_left_for_ensembles = time_left_for_ensembles self.backend = backend self.dataset_name = dataset_name @@ -235,7 +238,7 @@ def build_ensemble( max_models_on_disc=self.max_models_on_disc, seed=self.seed, precision=self.precision, - memory_limit=self.ensemble_memory_limit, + memory_limit=self.memory_limit, read_at_most=self.read_at_most, random_state=self.random_state, end_at=self.start_time + self.time_left_for_ensembles, From 3af17e1068a4a4b7cbf1b1c7005d361ebe1e1449 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 22:03:31 +0200 Subject: [PATCH 086/117] Add defaults and reorder parameters for EnsembleBuilderManager --- autosklearn/ensemble_building/manager.py | 89 ++++++++++++------------ 1 file changed, 44 insertions(+), 45 deletions(-) diff --git a/autosklearn/ensemble_building/manager.py b/autosklearn/ensemble_building/manager.py index c9d901fe37..2f1e468311 100644 --- a/autosklearn/ensemble_building/manager.py +++ b/autosklearn/ensemble_building/manager.py @@ -22,58 +22,60 @@ class EnsembleBuilderManager(IncorporateRunResultCallback): def __init__( self, - start_time: float, - time_left_for_ensembles: float, backend: Backend, dataset_name: str, task: int, metric: Scorer, + time_left_for_ensembles: float = 10, + max_iterations: int | None = None, + pynisher_context: str = "fork", ensemble_size: int = 10, ensemble_nbest: int | float = 100, - seed: int, + max_models_on_disc: int | float | None = None, + seed: int = 1, precision: int = 32, - max_iterations: Optional[int], - read_at_most: int, - memory_limit: Optional[int], - random_state: Union[int, np.random.RandomState], - max_models_on_disc: Optional[float | int] = 100, + memory_limit: int | None = None, + read_at_most: int | None = 5, logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, - pynisher_context: str = "fork", + random_state: int | np.random.RandomState | None = None, + start_time: float | None = None, ): """SMAC callback to handle ensemble building Parameters ---------- - start_time: int - the time when this job was started, to account for any latency in job - allocation. - - time_left_for_ensemble: int - How much time is left for the task. Job should finish within this - allocated time - - backend: util.backend.Backend + backend: Backend backend to write and read files dataset_name: str name of dataset - task_type: int - type of ML task + task: int + Type of ML task - metric: str - name of metric to compute the loss of the given predictions + metric: Scorer + Metric to compute the loss of the given predictions - ensemble_size: int + time_left_for_ensemble: float = 10 + How much time is left for the task in seconds. + Job should finish within this allocated time + + max_iterations: int | None = None + maximal number of iterations to run this script. None indicates no limit + on iterations. + + pynisher_context: "spawn" | "fork" | "forkserver" = "fork" + The multiprocessing context for pynisher. + + ensemble_size: int = 10 maximal size of ensemble - ensemble_nbest: int/float - if int: consider only the n best prediction - if float: consider only this fraction of the best models - Both wrt to validation predictions + ensemble_nbest: int | float = 100 + If int: consider only the n best prediction + If float: consider only this fraction of the best models If performance_range_threshold > 0, might return less models - max_models_on_disc: Optional[int | float] = 100 + max_models_on_disc: int | float | None = None Defines the maximum number of models that are kept in the disc. If int, it must be greater or equal than 1, and dictates the max @@ -88,28 +90,25 @@ def __init__( If None, the feature is disabled. It defines an upper bound on the models that can be used in the ensemble. - seed: int - random seed + seed: int = 1 + Seed used for the inidividual runs - max_iterations: int - maximal number of iterations to run this script - (default None --> deactivated) + precision: 16 | 32 | 64 | 128 = 32 + Precision of floats to read the predictions - precision: [16,32,64,128] - precision of floats to read the predictions + memory_limit: int | None = None + Memory limit in mb. If ``None``, no memory limit is enforced. - ensemble_memory_limit: Optional[int] - memory limit in mb. If ``None``, no memory limit is enforced. + read_at_most: int = 5 + Read at most n new prediction files in each iteration - read_at_most: int - read at most n new prediction files in each iteration - - logger_port: int - port that receives logging records - - pynisher_context: str - The multiprocessing context for pynisher. One of spawn/fork/forkserver. + logger_port: int = DEFAULT_TCP_LOGGING_PORT + Port that receives logging records + start_time: float | None = None + DISABLED: Just using time.time() to set it + The time when this job was started, to account for any latency in job + allocation. """ # TODO delete # Not used, overwritten later From fa55d15d7e4519f17a5b4428e1795dd99ce75cf1 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 22:09:42 +0200 Subject: [PATCH 087/117] Fixup parameters in `fit_and_return_ensemble` --- autosklearn/ensemble_building/manager.py | 108 +++++++++++------------ 1 file changed, 53 insertions(+), 55 deletions(-) diff --git a/autosklearn/ensemble_building/manager.py b/autosklearn/ensemble_building/manager.py index 2f1e468311..7856b49b8c 100644 --- a/autosklearn/ensemble_building/manager.py +++ b/autosklearn/ensemble_building/manager.py @@ -267,22 +267,22 @@ def build_ensemble( def fit_and_return_ensemble( + iteration: int, + end_at: float, backend: Backend, dataset_name: str, task_type: int, metric: Scorer, - ensemble_size: int, - ensemble_nbest: int, - seed: int, - precision: int, - read_at_most: int, - end_at: float, - iteration: int, pynisher_context: str, - max_models_on_disc: Optional[Union[float, int]] = 100, + ensemble_size: int = 10, + ensemble_nbest: int | float = 100, + max_models_on_disc: int | float | None = None, + seed: int = 1, + precision: int = 32, + memory_limit: int | None = None, + read_at_most: int | None = 5, logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, - memory_limit: Optional[int] = None, - random_state: Optional[Union[int, np.random.RandomState]] = None, + random_state: int | np.random.RandomState | None = None, ) -> tuple[list[dict[str, Any]], int | float]: """ @@ -291,69 +291,67 @@ def fit_and_return_ensemble( Parameters ---------- - backend: util.backend.Backend - backend to write and read files + iteration: int + The current iteration - dataset_name: str - name of dataset + end_at: float + At what time the job must finish. Needs to be the endtime and not the + time left because we do not know when dask schedules the job. - metric: str - name of metric to compute the loss of the given predictions + backend: Backend + Backend to write and read files - task_type: int - type of ML task + dataset_name: str + name of dataset - ensemble_size: int - maximal size of ensemble (passed to autosklearn.ensemble.ensemble_selection) + task_type: int + type of ML task - ensemble_nbest: int/float - if int: consider only the n best prediction - if float: consider only this fraction of the best models - Both wrt to validation predictions - If performance_range_threshold > 0, might return less models + metric: Scorer + Metric to compute the loss of the given predictions - max_models_on_disc: Optional[int | float] = 100 - Defines the maximum number of models that are kept in the disc. + pynisher_context: "fork" | "spawn" | "forkserver" = "fork" + Context to use for multiprocessing, can be either fork, spawn or forkserver. - If int, it must be greater or equal than 1, and dictates the max number of - models to keep. + ensemble_size: int = 10 + Maximal size of ensemble - If float, it will be interpreted as the max megabytes allowed of disc space. - That is, if the number of ensemble candidates require more disc space than - this float value, the worst models will be deleted to keep within this - budget. Models and predictions of the worst-performing models will be - deleted then. + ensemble_nbest: int | float = 1000 + If int: consider only the n best prediction + If float: consider only this fraction of the best models + If performance_range_threshold > 0, might return less models - If None, the feature is disabled. - It defines an upper bound on the models that can be used in the ensemble. + max_models_on_disc: int | float | None = 100 + Defines the maximum number of models that are kept in the disc. - seed: int - random seed + If int, it must be greater or equal than 1, and dictates the max number of + models to keep. - precision: [16,32,64,128] - precision of floats to read the predictions + If float, it will be interpreted as the max megabytes allowed of disc space. + That is, if the number of ensemble candidates require more disc space than + this float value, the worst models will be deleted to keep within this + budget. Models and predictions of the worst-performing models will be + deleted then. - read_at_most: int - read at most n new prediction files in each iteration + If None, the feature is disabled. - end_at: float - At what time the job must finish. Needs to be the endtime and not the - time left because we do not know when dask schedules the job. + seed: int = 1 + Seed used for training the models in the backend - iteration: int - The current iteration + precision: 16 | 32 | 64 | 128 = 32 + Precision of floats to read the predictions - pynisher_context: str - Context to use for multiprocessing, can be either fork, spawn or forkserver. + memory_limit: int | None = None + Memory limit in mb. If ``None``, no memory limit is enforced. - logger_port: int = DEFAULT_TCP_LOGGING_PORT - The port where the logging server is listening to. + read_at_most: int = 5 + Read at most n new prediction files in each iteration - memory_limit: Optional[int] = None - memory limit in mb. If ``None``, no memory limit is enforced. + logger_port: int = DEFAULT_TCP_LOGGING_PORT + The port where the logging server is listening to. - random_state: Optional[int | RandomState] = None - A random state used for the ensemble selection process. + random_state: int | RandomState | None = None + A random state used for the ensemble selection process. Returns ------- From 2115f0ce15eb3fa13491cc743aa808f471edff32 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 22:10:58 +0200 Subject: [PATCH 088/117] Typing fixes --- autosklearn/ensemble_building/manager.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/autosklearn/ensemble_building/manager.py b/autosklearn/ensemble_building/manager.py index 7856b49b8c..0aabbafafd 100644 --- a/autosklearn/ensemble_building/manager.py +++ b/autosklearn/ensemble_building/manager.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any, Optional, Union +from typing import Any import logging.handlers import time @@ -110,10 +110,6 @@ def __init__( The time when this job was started, to account for any latency in job allocation. """ - # TODO delete - # Not used, overwritten later - self.start_time = start_time - self.time_left_for_ensembles = time_left_for_ensembles self.backend = backend self.dataset_name = dataset_name @@ -126,7 +122,7 @@ def __init__( self.precision = precision self.max_iterations = max_iterations self.read_at_most = read_at_most - self.ensemble_memory_limit = ensemble_memory_limit + self.memory_limit = memory_limit self.random_state = random_state self.logger_port = logger_port self.pynisher_context = pynisher_context From 050d9a4446b493c3f1c0c63be5f19ef33fcb787d Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 22:13:44 +0200 Subject: [PATCH 089/117] Make `fit_and_return_ensemble` a staticmethod --- autosklearn/ensemble_building/manager.py | 225 +++++++++++------------ 1 file changed, 112 insertions(+), 113 deletions(-) diff --git a/autosklearn/ensemble_building/manager.py b/autosklearn/ensemble_building/manager.py index 0aabbafafd..958ecb548c 100644 --- a/autosklearn/ensemble_building/manager.py +++ b/autosklearn/ensemble_building/manager.py @@ -223,7 +223,7 @@ def build_ensemble( # wait for the below function to be done self.futures.append( dask_client.submit( - fit_and_return_ensemble, + EnsembleBuilderManager.fit_and_return_ensemble, backend=self.backend, dataset_name=self.dataset_name, task_type=self.task, @@ -261,116 +261,115 @@ def build_ensemble( logger.critical(exception_traceback) logger.critical(error_message) + @staticmethod + def fit_and_return_ensemble( + iteration: int, + end_at: float, + backend: Backend, + dataset_name: str, + task_type: int, + metric: Scorer, + pynisher_context: str, + ensemble_size: int = 10, + ensemble_nbest: int | float = 100, + max_models_on_disc: int | float | None = None, + seed: int = 1, + precision: int = 32, + memory_limit: int | None = None, + read_at_most: int | None = 5, + logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, + random_state: int | np.random.RandomState | None = None, + ) -> tuple[list[dict[str, Any]], int | float]: + """ + A short function to fit and create an ensemble. It is just a wrapper to easily + send a request to dask to create an ensemble and clean the memory when finished + + Parameters + ---------- + iteration: int + The current iteration + + end_at: float + At what time the job must finish. Needs to be the endtime and not the + time left because we do not know when dask schedules the job. + + backend: Backend + Backend to write and read files + + dataset_name: str + name of dataset + + task_type: int + type of ML task + + metric: Scorer + Metric to compute the loss of the given predictions + + pynisher_context: "fork" | "spawn" | "forkserver" = "fork" + Context to use for multiprocessing, can be either fork, spawn or forkserver. + + ensemble_size: int = 10 + Maximal size of ensemble + + ensemble_nbest: int | float = 1000 + If int: consider only the n best prediction + If float: consider only this fraction of the best models + If performance_range_threshold > 0, might return less models + + max_models_on_disc: int | float | None = 100 + Defines the maximum number of models that are kept in the disc. + + If int, it must be greater or equal than 1, and dictates the max number of + models to keep. + + If float, it will be interpreted as the max megabytes allowed of disc space. + That is, if the number of ensemble candidates require more disc space than + this float value, the worst models will be deleted to keep within this + budget. Models and predictions of the worst-performing models will be + deleted then. + + If None, the feature is disabled. + + seed: int = 1 + Seed used for training the models in the backend + + precision: 16 | 32 | 64 | 128 = 32 + Precision of floats to read the predictions + + memory_limit: int | None = None + Memory limit in mb. If ``None``, no memory limit is enforced. + + read_at_most: int = 5 + Read at most n new prediction files in each iteration -def fit_and_return_ensemble( - iteration: int, - end_at: float, - backend: Backend, - dataset_name: str, - task_type: int, - metric: Scorer, - pynisher_context: str, - ensemble_size: int = 10, - ensemble_nbest: int | float = 100, - max_models_on_disc: int | float | None = None, - seed: int = 1, - precision: int = 32, - memory_limit: int | None = None, - read_at_most: int | None = 5, - logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, - random_state: int | np.random.RandomState | None = None, -) -> tuple[list[dict[str, Any]], int | float]: - """ - - A short function to fit and create an ensemble. It is just a wrapper to easily send - a request to dask to create an ensemble and clean the memory when finished - - Parameters - ---------- - iteration: int - The current iteration - - end_at: float - At what time the job must finish. Needs to be the endtime and not the - time left because we do not know when dask schedules the job. - - backend: Backend - Backend to write and read files - - dataset_name: str - name of dataset - - task_type: int - type of ML task - - metric: Scorer - Metric to compute the loss of the given predictions - - pynisher_context: "fork" | "spawn" | "forkserver" = "fork" - Context to use for multiprocessing, can be either fork, spawn or forkserver. - - ensemble_size: int = 10 - Maximal size of ensemble - - ensemble_nbest: int | float = 1000 - If int: consider only the n best prediction - If float: consider only this fraction of the best models - If performance_range_threshold > 0, might return less models - - max_models_on_disc: int | float | None = 100 - Defines the maximum number of models that are kept in the disc. - - If int, it must be greater or equal than 1, and dictates the max number of - models to keep. - - If float, it will be interpreted as the max megabytes allowed of disc space. - That is, if the number of ensemble candidates require more disc space than - this float value, the worst models will be deleted to keep within this - budget. Models and predictions of the worst-performing models will be - deleted then. - - If None, the feature is disabled. - - seed: int = 1 - Seed used for training the models in the backend - - precision: 16 | 32 | 64 | 128 = 32 - Precision of floats to read the predictions - - memory_limit: int | None = None - Memory limit in mb. If ``None``, no memory limit is enforced. - - read_at_most: int = 5 - Read at most n new prediction files in each iteration - - logger_port: int = DEFAULT_TCP_LOGGING_PORT - The port where the logging server is listening to. - - random_state: int | RandomState | None = None - A random state used for the ensemble selection process. - - Returns - ------- - (ensemble_history: list[dict[str, Any]], nbest: int | float) - The ensemble history and the nbest chosen members - """ - result = EnsembleBuilder( - backend=backend, - dataset_name=dataset_name, - task_type=task_type, - metric=metric, - ensemble_size=ensemble_size, - ensemble_nbest=ensemble_nbest, - max_models_on_disc=max_models_on_disc, - seed=seed, - precision=precision, - memory_limit=memory_limit, - read_at_most=read_at_most, - random_state=random_state, - logger_port=logger_port, - ).run( - end_at=end_at, - iteration=iteration, - pynisher_context=pynisher_context, - ) - return result + logger_port: int = DEFAULT_TCP_LOGGING_PORT + The port where the logging server is listening to. + + random_state: int | RandomState | None = None + A random state used for the ensemble selection process. + + Returns + ------- + (ensemble_history: list[dict[str, Any]], nbest: int | float) + The ensemble history and the nbest chosen members + """ + result = EnsembleBuilder( + backend=backend, + dataset_name=dataset_name, + task_type=task_type, + metric=metric, + ensemble_size=ensemble_size, + ensemble_nbest=ensemble_nbest, + max_models_on_disc=max_models_on_disc, + seed=seed, + precision=precision, + memory_limit=memory_limit, + read_at_most=read_at_most, + random_state=random_state, + logger_port=logger_port, + ).run( + end_at=end_at, + iteration=iteration, + pynisher_context=pynisher_context, + ) + return result From d3da909a9c979cf8c897e4706d3b6589f19e6d97 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 22:24:09 +0200 Subject: [PATCH 090/117] Add: `make_ensemble_builder_manager` --- test/fixtures/ensemble_building.py | 63 +++++++++++++++++++++++++++++- 1 file changed, 61 insertions(+), 2 deletions(-) diff --git a/test/fixtures/ensemble_building.py b/test/fixtures/ensemble_building.py index d0f8d6917a..53080383f0 100644 --- a/test/fixtures/ensemble_building.py +++ b/test/fixtures/ensemble_building.py @@ -2,9 +2,9 @@ from typing import Any, Callable -import sys import math import pickle +import sys from pathlib import Path import numpy as np @@ -12,7 +12,7 @@ from autosklearn.automl_common.common.utils.backend import Backend from autosklearn.constants import BINARY_CLASSIFICATION from autosklearn.data.xy_data_manager import XYDataManager -from autosklearn.ensemble_building import EnsembleBuilder, Run +from autosklearn.ensemble_building import EnsembleBuilder, EnsembleBuilderManager, Run from autosklearn.metrics import Scorer, accuracy from pytest_cases import fixture @@ -144,3 +144,62 @@ def _make( return builder return _make + + +@fixture +def make_ensemble_builder_manager( + make_backend: Callable[..., Backend], + make_sklearn_dataset: Callable[..., XYDataManager], +) -> Callable[..., EnsembleBuilderManager]: + """Use `make_run` to create runs for this manager + + .. code:: python + + def test_x(make_run, make_ensemble_builder_manager): + manager = make_ensemble_builder(...) + + # Will use the backend to place runs correctly + runs = make_run(predictions={"ensemble": ...}, backend=manager.backend) + + # ... test stuff + + + """ + + def _make( + *, + backend: Backend | None = None, + dataset_name: str = "TEST", + task: int = BINARY_CLASSIFICATION, + metric: Scorer = accuracy, + random_state: int | np.random.RandomState | None = DEFAULT_SEED, + **kwargs: Any, + ) -> EnsembleBuilderManager: + if backend is None: + backend = make_backend() + + if not Path(backend._get_datamanager_pickle_filename()).exists(): + datamanager = make_sklearn_dataset( + name="breast_cancer", + task=BINARY_CLASSIFICATION, + feat_type="numerical", # They're all numerical + as_datamanager=True, + ) + backend.save_datamanager(datamanager) + + # Annoyingly, some places use datamanger, some places use the file + # Hence, we take the y_train of the datamanager and use that as the + # the targets + if "Y_train" in datamanager.data: + backend.save_targets_ensemble(datamanager.data["Y_train"]) + + return EnsembleBuilderManager( + backend=backend, + dataset_name=dataset_name, + task=task, + metric=metric, + random_state=random_state, + **kwargs, + ) + + return _make From d65f1ce1900bab16d638455397491e9992819dc0 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 22:28:30 +0200 Subject: [PATCH 091/117] Add: Test files for manager --- test/test_ensemble_builder/test_manager.py | 40 ++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 test/test_ensemble_builder/test_manager.py diff --git a/test/test_ensemble_builder/test_manager.py b/test/test_ensemble_builder/test_manager.py new file mode 100644 index 0000000000..e3078c1344 --- /dev/null +++ b/test/test_ensemble_builder/test_manager.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +from typing import Callable + +from autosklearn.automl import AutoML +from autosklearn.ensemble_building import EnsembleBuilderManager + +from pytest_cases import parametrize_with_cases +from unittest.mock import MagicMock, patch + +import test.test_automl.cases as cases +from test.conftest import DEFAULT_SEED + + +@parametrize_with_cases("automl", cases=cases, has_tag="fitted") +def case_real_runs( + automl: AutoML, + make_ensemble_builder_manager: Callable[..., EnsembleBuilderManager], +) -> EnsembleBuilderManager: + """Uses real runs from a fitted automl instance""" + manager = make_ensemble_builder_manager( + backend=automl._backend, + metric=automl._metric, + task=automl._task, + dataset_name=automl._dataset_name, + seed=automl._seed, + logger_port=automl._logger_port, + random_state=DEFAULT_SEED, + ) + return manager + + +@parametrize_with_cases("manager", cases=case_real_runs) +def test_run_builds_valid_ensemble(manager: EnsembleBuilderManager) -> None: + ... + + +@parametrize_with_cases("builder", cases=case_real_runs) +def test_main(builder: EnsembleBuilderManager) -> None: + ... From 7aced10d0a647cf63780eeec7d1d753a90521a0b Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 22:53:09 +0200 Subject: [PATCH 092/117] Add atomic rmtree --- autosklearn/util/disk.py | 41 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/autosklearn/util/disk.py b/autosklearn/util/disk.py index 14a8ee64f7..d52b7f26ef 100644 --- a/autosklearn/util/disk.py +++ b/autosklearn/util/disk.py @@ -1,6 +1,10 @@ from __future__ import annotations +from typing import Any + import math +import uuid +import shutil from pathlib import Path sizes = { @@ -41,3 +45,40 @@ def sizeof(path: Path | str, unit: str = "B") -> float: power = sizes[unit] return size / math.pow(1024, power) + + +def rmtree(path: Path | str, *, atomic: bool = False, **kwargs: Any) -> None: + """Delete a file or directory + + Parameters + ---------- + path: Path | str + The path to delete + + atomic: bool = False + Whether to delete the file/folder atomically. This is done using + `move` and `rmtree`. + + The deletion part is not guaranteed to be atomic but the folder + is highly likely to at least be renamed. + + The `move` is not guaranteed to be atomic either if moving between + different file systems which can happen when moving to /tmp, + depending on the OS and setup. + + * https://docs.python.org/3/library/shutil.html#shutil.move + + **kwargs + Forwarded to `rmtree` if `atmoic=True` + * https://docs.python.org/3/library/shutil.html#shutil.rmtree + """ + if isinstance(path, str): + path = Path(path) + + if atomic: + uid = uuid.uuid4() + mvpath = path.parent / f"{path.name}.old_{uid}" + shutil.move(str(path), str(mvpath)) + shutil.rmtree(mvpath, **kwargs) + else: + shutil.rmtree(mvpath, **kwargs) From d938b0052ca90ad91c1634d57cb2b41b5eb92f76 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 22:59:43 +0200 Subject: [PATCH 093/117] Add: atomic rmtree now accepts where mv should go --- autosklearn/util/disk.py | 43 +++++++++++++++++++++++++++++----------- 1 file changed, 31 insertions(+), 12 deletions(-) diff --git a/autosklearn/util/disk.py b/autosklearn/util/disk.py index d52b7f26ef..279a88f0ea 100644 --- a/autosklearn/util/disk.py +++ b/autosklearn/util/disk.py @@ -3,8 +3,9 @@ from typing import Any import math -import uuid import shutil +import tempfile +import uuid from pathlib import Path sizes = { @@ -47,7 +48,13 @@ def sizeof(path: Path | str, unit: str = "B") -> float: return size / math.pow(1024, power) -def rmtree(path: Path | str, *, atomic: bool = False, **kwargs: Any) -> None: +def rmtree( + path: Path | str, + *, + atomic: bool = False, + tmp: bool | Path | str = False, + **kwargs: Any, +) -> None: """Delete a file or directory Parameters @@ -56,29 +63,41 @@ def rmtree(path: Path | str, *, atomic: bool = False, **kwargs: Any) -> None: The path to delete atomic: bool = False - Whether to delete the file/folder atomically. This is done using - `move` and `rmtree`. + Whether to delete the file/folder atomically. This is done by first + using a `move` before `rmtree`. - The deletion part is not guaranteed to be atomic but the folder - is highly likely to at least be renamed. - - The `move` is not guaranteed to be atomic either if moving between + The `move` is not guaranteed to be atomic if moving between different file systems which can happen when moving to /tmp, depending on the OS and setup. + The deletion part is not atomic. + * https://docs.python.org/3/library/shutil.html#shutil.move + tmp: bool | Path | str = False + If bool, this defines whether atomic should use the tmp dir + for it's move. Otherwise, a path can be specified to use + **kwargs - Forwarded to `rmtree` if `atmoic=True` + Forwarded to `rmtree` * https://docs.python.org/3/library/shutil.html#shutil.rmtree """ if isinstance(path, str): path = Path(path) if atomic: - uid = uuid.uuid4() - mvpath = path.parent / f"{path.name}.old_{uid}" + if tmp is True: + dir = Path(tempfile.gettempdir()) + uid = uuid.uuid4() + mvpath = dir / f"autosklearn-{path.name}.old_{uid}" + + elif tmp is False: + uid = uuid.uuid4() + mvpath = path.parent / f"{path.name}.old_{uid}" + else: + mvpath = tmp if isinstance(tmp, Path) else Path(tmp) + shutil.move(str(path), str(mvpath)) shutil.rmtree(mvpath, **kwargs) else: - shutil.rmtree(mvpath, **kwargs) + shutil.rmtree(path, **kwargs) From 449a67aa2d6cc9dab93ad380a4bb27a83c55e89f Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Tue, 19 Apr 2022 23:01:12 +0200 Subject: [PATCH 094/117] Make builder use atomic rmtree --- autosklearn/ensemble_building/builder.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 98b6955632..54ff46c9c1 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -7,7 +7,6 @@ import numbers import os import pickle -import shutil import time import traceback from itertools import accumulate @@ -26,9 +25,16 @@ from autosklearn.util.functional import cut, findwhere, split from autosklearn.util.logging_ import get_named_client_logger from autosklearn.util.parallel import preload_modules +from autosklearn.util.disk import rmtree class EnsembleBuilder: + """Builds ensembles out of runs that exist in the Backend + + This is used by EnsembleBuilderManager and created in a dask-client + every time a run finishes and there is currently no EnsembleBuilder active. + """ + def __init__( self, backend: Backend, @@ -904,7 +910,7 @@ def delete_runs(self, runs: Iterable[Run]) -> None: items = iter(run for run in runs if not run.is_dummy() and run.dir.exists()) for run in items: try: - shutil.rmtree(run.dir) + rmtree(run.dir, atomic=True) self.logger.info(f"Deleted files for {run}") except Exception as e: self.logger.error(f"Failed to delete files for {run}: \n{e}") From 73978a0f675a80f88fc2a6c848b81381da5a30e3 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Mon, 2 May 2022 15:29:20 +0200 Subject: [PATCH 095/117] Fix import bugs, remove valid preds in builder --- autosklearn/automl.py | 4 +- autosklearn/ensemble_building/builder.py | 77 +++++++++---------- autosklearn/ensemble_building/run.py | 29 ++++--- autosklearn/util/functional.py | 2 +- test/test_automl/test_outputs.py | 15 +++- .../test_ensemble_builder.py | 18 ++--- test/test_estimators/test_estimators.py | 6 +- 7 files changed, 84 insertions(+), 67 deletions(-) diff --git a/autosklearn/automl.py b/autosklearn/automl.py index 920880eaf0..3f2fa81f30 100644 --- a/autosklearn/automl.py +++ b/autosklearn/automl.py @@ -798,7 +798,7 @@ def fit( precision=self.precision, max_iterations=None, read_at_most=np.inf, - ensemble_memory_limit=self._memory_limit, + memory_limit=self._memory_limit, random_state=self._seed, logger_port=self._logger_port, pynisher_context=self._multiprocessing_context, @@ -911,7 +911,7 @@ def fit( ) result = proc_ensemble.futures.pop().result() if result: - ensemble_history, _, _, _, _ = result + ensemble_history, _ = result self.ensemble_performance_history.extend(ensemble_history) self._logger.info("Ensemble script finished, continue shutdown.") diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 54ff46c9c1..75c8e2fb7c 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -22,10 +22,12 @@ from autosklearn.ensemble_building.run import Run, RunID from autosklearn.ensembles.ensemble_selection import EnsembleSelection from autosklearn.metrics import Scorer, calculate_loss, calculate_score +from autosklearn.util.disk import rmtree from autosklearn.util.functional import cut, findwhere, split from autosklearn.util.logging_ import get_named_client_logger from autosklearn.util.parallel import preload_modules -from autosklearn.util.disk import rmtree + +CANDIDATES_FILENAME = "previous_ensemble_building_candidates.pkl" class EnsembleBuilder: @@ -175,15 +177,13 @@ def __init__( # Data we may need datamanager: XYDataManager = self.backend.load_datamanager() - self._y_valid: np.ndarray | None = datamanager.data.get("Y_valid", None) self._y_test: np.ndarray | None = datamanager.data.get("Y_test", None) self._y_ensemble: np.ndarray | None = None @property def previous_candidates_path(self) -> Path: """Path to the cached losses we store between runs""" - fname = "previous_ensemble_building_candidates.pkl" - return Path(self.backend.internals_directory) / fname + return Path(self.backend.internals_directory) / CANDIDATES_FILENAME def previous_candidates(self) -> dict[RunID, Run]: """Load any previous candidates that were saved from previous runs @@ -228,9 +228,6 @@ def targets(self, kind: str = "ensemble") -> np.ndarray | None: self._y_ensemble = self.backend.load_targets_ensemble() return self._y_ensemble - elif kind == "valid": - return self._y_valid - elif kind == "test": return self._y_test @@ -269,7 +266,7 @@ def run( Returns ------- - (ensemble_history, nbest, train_preds, valid_preds, test_preds) + (ensemble_history, nbest) """ if time_left is None and end_at is None: raise ValueError("Must provide either time_left or end_at.") @@ -435,6 +432,7 @@ def main( # Get the dummy and real runs dummies, candidates = split(runs, by=lambda r: r.is_dummy()) + print(dummies, candidates) # We see if we need to delete any of the real runs before we waste compute # on evaluating their candidacy for ensemble building @@ -467,43 +465,19 @@ def main( candidates = dummies self.logger.warning("No real runs to build ensemble from") - # Get a set representation of them as we will begin doing intersections - # Not here that valid_set and test_set are both subsets of candidates_set - # ... then find intersect and use that to fit the ensemble + # If there's an intersect with models that have some predictions on the + # test subset, use that subset, otherwise use all of the candidates candidates_set = set(candidates) - valid_subset = {r for r in candidates if r.pred_path("valid").exists()} test_subset = {r for r in candidates if r.pred_path("test").exists()} - intersect = valid_subset & test_subset - if len(intersect) == 0 and len(test_subset) > 0 and len(valid_subset) > 0: - self.logger.error("valid_set and test_set not empty but do not overlap") - raise RuntimeError("valid_set and test_set not empty but do not overlap") - - # Try to use the runs which have the most kinds of preds, otherwise just use all - if len(intersect) > 0: - candidates = sorted(intersect, key=lambda r: r.id) - valid_models = candidates - test_models = candidates - - self.delete_runs(candidates_set - intersect) - - elif len(valid_subset) > 0: - candidates = sorted(valid_subset, key=lambda r: r.id) - valid_models = candidates - test_models = [] - - self.delete_runs(candidates_set - valid_subset) - - elif len(test_subset) > 0: + if len(test_subset) > 0: candidates = sorted(test_subset, key=lambda r: r.id) - valid_models = [] test_models = candidates self.delete_runs(candidates_set - test_subset) else: candidates = sorted(candidates_set, key=lambda r: r.id) - valid_models = [] test_models = [] # To save on pickle and to allow for fresh predictions, unload the cache @@ -550,7 +524,6 @@ def main( for kind, score_name, models in [ ("ensemble", "optimization", candidates), - ("valid", "val", valid_models), ("test", "test", test_models), ]: if len(models) == 0: @@ -561,9 +534,7 @@ def main( self.logger.warning(f"No ensemble targets for {kind}") continue - run_preds = [ - r.predictions(kind, precision=self.precision) for r in models - ] + run_preds = [r.predictions(kind, precision=self.precision) for r in models] pred = ensemble.predict(run_preds) # Pretty sure this whole step is uneeded but left over and afraid @@ -761,12 +732,34 @@ def fit_ensemble( precision: int | None = None, random_state: int | np.random.RandomState | None = None, ) -> EnsembleSelection: - """TODO + """Fit an ensemble from the provided runs. + + Note + ---- + Expects all runs to have the "ensemble" predictions present Parameters ---------- - selected_keys: list[str] - List of selected keys of self.runs + runs: list[Run] + List of runs to build an ensemble from + + targets: np.ndarray + The targets to build the ensemble with + + size: int | None = None + The size of the ensemble to build + + task: int | None = None + The kind of task performed + + metric: Scorer | None = None + The metric to use when comparing run predictions to the targets + + precision: int | None = None + The precision with which to load run predictions + + random_state: int | RandomState | None = None + The random state to use Returns ------- diff --git a/autosklearn/ensemble_building/run.py b/autosklearn/ensemble_building/run.py index 3afbe70e69..36b9fdca89 100644 --- a/autosklearn/ensemble_building/run.py +++ b/autosklearn/ensemble_building/run.py @@ -2,8 +2,8 @@ from typing import Tuple -from pathlib import Path import re +from pathlib import Path import numpy as np @@ -15,7 +15,7 @@ class Run: """Class for storing information about a run used during ensemble building""" - re_model_dir = r'^([0-9]*)_([0-9]*)_([0-9]{1,3}\.[0-9]*)$' + RE_MODEL_DIR = r"^([0-9]*)_([0-9]*)_([0-9]{1,3}\.[0-9]*)$" def __init__(self, path: Path) -> None: """Creates a Run from a path point to the directory of a run @@ -44,7 +44,7 @@ def __init__(self, path: Path) -> None: # Items that will be delete when the run is saved back to file self._cache: dict[str, np.ndarray] = {} - # The recorded time of ensemble/test/valid predictions modified + # The recorded time of ensemble/test predictions modified self.recorded_mtimes: dict[str, float] = {} self.record_modified_times() @@ -74,11 +74,25 @@ def pred_path(self, kind: str = "ensemble") -> Path: def record_modified_times(self) -> None: """Records the last time each prediction file type was modified, if it exists""" self.recorded_mtimes = {} - for kind in ["ensemble", "valid", "test"]: + for kind in ["ensemble", "test"]: path = self.pred_path(kind) # type: ignore if path.exists(): self.recorded_mtimes[kind] = path.stat().st_mtime + def has_predictions(self, kind: str = "ensemble") -> bool: + """ + Parameters + ---------- + kind: "ensemble" | "test" = "ensemble" + The kind of predictions to query for + + Returns + ------- + bool + Whether this run has the kind of predictions queried for + """ + return self.pred_path(kind).exists() + def predictions( self, kind: str = "ensemble", @@ -88,7 +102,7 @@ def predictions( Parameters ---------- - kind : "ensemble" | "test" | "valid" + kind : "ensemble" | "test" The kind of predictions to load precisions : type | None = None @@ -105,9 +119,6 @@ def predictions( path = self.pred_path(kind) - if not path.exists(): - raise RuntimeError(f"No predictions for {kind}") - with path.open("rb") as f: # TODO: We should probably remove this requirement. I'm not sure why model # predictions are being saved as pickled @@ -158,4 +169,4 @@ def valid(path: Path) -> bool: bool Whether the path is a valid run dir """ - return re.match(Run.re_model_dir, path.name) is not None + return re.match(Run.RE_MODEL_DIR, path.name) is not None diff --git a/autosklearn/util/functional.py b/autosklearn/util/functional.py index 59604331be..043c578f09 100644 --- a/autosklearn/util/functional.py +++ b/autosklearn/util/functional.py @@ -3,7 +3,7 @@ from typing import Callable, Iterable, TypeVar from functools import reduce -from itertools import tee, chain +from itertools import chain, tee import numpy as np diff --git a/test/test_automl/test_outputs.py b/test/test_automl/test_outputs.py index 458347c145..ba005019bd 100644 --- a/test/test_automl/test_outputs.py +++ b/test/test_automl/test_outputs.py @@ -1,6 +1,7 @@ from pathlib import Path from autosklearn.automl import AutoML +from autosklearn.ensemble_building.builder import CANDIDATES_FILENAME from pytest import mark from pytest_cases import parametrize_with_cases @@ -67,10 +68,9 @@ def test_paths_created_with_ensemble(automl: AutoML) -> None: expected = [ partial / fixture for fixture in ( - "ensemble_read_preds.pkl", - "ensemble_read_losses.pkl", "ensembles", "ensemble_history.json", + CANDIDATES_FILENAME, ) ] @@ -80,6 +80,12 @@ def test_paths_created_with_ensemble(automl: AutoML) -> None: @parametrize_with_cases("automl", cases=cases, has_tag="fitted") def test_at_least_one_model_and_predictions(automl: AutoML) -> None: + """ + Expects + ------- + * There should be at least one models saved + * Each model saved should have predictions for the ensemble + """ assert automl._backend is not None runs_dir = Path(automl._backend.get_runs_directory()) @@ -100,6 +106,11 @@ def test_at_least_one_model_and_predictions(automl: AutoML) -> None: @parametrize_with_cases("automl", cases=cases, filter=has_ensemble) def test_at_least_one_ensemble(automl: AutoML) -> None: + """ + Expects + ------- + * There should be at least one ensemble generated + """ assert automl._backend is not None ens_dir = Path(automl._backend.get_ensemble_dir()) diff --git a/test/test_ensemble_builder/test_ensemble_builder.py b/test/test_ensemble_builder/test_ensemble_builder.py index 5c41253d5d..9a3e2a374f 100644 --- a/test/test_ensemble_builder/test_ensemble_builder.py +++ b/test/test_ensemble_builder/test_ensemble_builder.py @@ -11,11 +11,10 @@ from autosklearn.ensemble_building import EnsembleBuilder, Run from autosklearn.util.functional import bound, pairs +import pytest from pytest_cases import fixture, parametrize from unittest.mock import patch -from test.util import fails - @fixture def builder(make_ensemble_builder: Callable[..., EnsembleBuilder]) -> EnsembleBuilder: @@ -23,7 +22,7 @@ def builder(make_ensemble_builder: Callable[..., EnsembleBuilder]) -> EnsembleBu return make_ensemble_builder() -@parametrize("kind", ["ensemble", fails("valid", "Not supported anymore?"), "test"]) +@parametrize("kind", ["ensemble", "test"]) def test_targets(builder: EnsembleBuilder, kind: str) -> None: """ Expects @@ -74,6 +73,7 @@ def test_available_runs_with_bad_dir_contained(builder: EnsembleBuilder) -> None available_runs = builder.available_runs() assert len(available_runs) == len(paths) + def test_requires_loss_update_with_modified_runs( builder: EnsembleBuilder, make_run: Callable[..., Run], @@ -412,7 +412,7 @@ def test_requires_memory_limit( assert not any(run.loss > best_deleted for run in keep) -@parametrize("kind", ["ensemble", "valid", "test"]) +@parametrize("kind", ["ensemble", "test"]) def test_loss_with_no_ensemble_targets( builder: EnsembleBuilder, make_run: Callable[..., Run], @@ -428,7 +428,7 @@ def test_loss_with_no_ensemble_targets( assert builder.loss(run, kind=kind) == np.inf -@parametrize("kind", ["ensemble", fails("valid", "Not supported anymore?"), "test"]) +@parametrize("kind", ["ensemble", "test"]) def test_loss_with_targets( builder: EnsembleBuilder, make_run: Callable[..., Run], @@ -513,8 +513,8 @@ def test_fit_with_error_gives_no_ensemble( """ Expects ------- - * A run without predictions will raise an error, causing the `fit_ensemble` to fail - and return None + * A run without predictions will raise an error will cause `fit_ensemble` to fail + as it requires all runs to have valid predictions """ targets = builder.targets("ensemble") assert targets is not None @@ -526,8 +526,8 @@ def test_fit_with_error_gives_no_ensemble( runs.append(bad_run) - ensemble = builder.fit_ensemble(runs, targets) - assert ensemble is None + with pytest.raises(FileNotFoundError): + builder.fit_ensemble(runs, targets) @parametrize("time_buffer", [1, 5]) diff --git a/test/test_estimators/test_estimators.py b/test/test_estimators/test_estimators.py index 0962179f34..730647f948 100644 --- a/test/test_estimators/test_estimators.py +++ b/test/test_estimators/test_estimators.py @@ -28,7 +28,7 @@ import autosklearn.pipeline.util as putil from autosklearn.automl import AutoMLClassifier from autosklearn.data.validation import InputValidator -from autosklearn.ensemble_building.builder import MODEL_FN_RE +from autosklearn.ensemble_building.run import Run from autosklearn.estimators import ( AutoSklearnClassifier, AutoSklearnEstimator, @@ -110,7 +110,9 @@ def __call__(self, *args, **kwargs): seeds = set() for prediction in predictions: prediction = os.path.split(prediction)[1] - match = re.match(MODEL_FN_RE, prediction.replace("predictions_ensemble", "")) + match = re.match( + Run.RE_MODEL_DIR, prediction.replace("predictions_ensemble", "") + ) if match: num_run = int(match.group(2)) available_predictions.add(num_run) From bf0e2dbfe91afe33d11a854bf9909cfd5fabe627 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Mon, 2 May 2022 15:50:26 +0200 Subject: [PATCH 096/117] Remove `np.inf` as valid arg for `read_at_most` --- autosklearn/automl.py | 4 ++-- autosklearn/ensemble_building/builder.py | 7 ++++--- autosklearn/ensemble_building/manager.py | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/autosklearn/automl.py b/autosklearn/automl.py index 3f2fa81f30..b1b9183cd1 100644 --- a/autosklearn/automl.py +++ b/autosklearn/automl.py @@ -797,7 +797,7 @@ def fit( seed=self._seed, precision=self.precision, max_iterations=None, - read_at_most=np.inf, + read_at_most=None, memory_limit=self._memory_limit, random_state=self._seed, logger_port=self._logger_port, @@ -1499,7 +1499,7 @@ def fit_ensemble( seed=self._seed, precision=precision if precision else self.precision, max_iterations=1, - read_at_most=np.inf, + read_at_most=None, memory_limit=self._memory_limit, random_state=self._seed, logger_port=self._logger_port, diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 75c8e2fb7c..428cb2bc4e 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -132,8 +132,8 @@ def __init__( if max_models_on_disc is not None and max_models_on_disc < 0: raise ValueError("max_models_on_disc must be positive or None") - if read_at_most is not None and read_at_most < 1: - raise ValueError("Read at most must be greater than 1") + if read_at_most is not None and (read_at_most < 1 or read_at_most == np.inf): + raise ValueError("Read at most must be int greater than 1 or None") # Setup the logger self.logger = get_named_client_logger(name="EnsembleBuilder", port=logger_port) @@ -423,7 +423,8 @@ def main( # Calculate the loss for those that require it requires_update = self.requires_loss_update(runs) - if self.read_at_most: + print("HERE----\n\n", str(self.read_at_most)) + if self.read_at_most is not None: requires_update = requires_update[: self.read_at_most] for run in requires_update: diff --git a/autosklearn/ensemble_building/manager.py b/autosklearn/ensemble_building/manager.py index 958ecb548c..ab9e7f428c 100644 --- a/autosklearn/ensemble_building/manager.py +++ b/autosklearn/ensemble_building/manager.py @@ -339,7 +339,7 @@ def fit_and_return_ensemble( memory_limit: int | None = None Memory limit in mb. If ``None``, no memory limit is enforced. - read_at_most: int = 5 + read_at_most: int | None = 5 Read at most n new prediction files in each iteration logger_port: int = DEFAULT_TCP_LOGGING_PORT From 26e9d4926058bc0edbd6c9aae6909ddfaeabbd5f Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Wed, 4 May 2022 13:15:46 +0200 Subject: [PATCH 097/117] Possible reproducible num_run, no predictions error --- autosklearn/automl.py | 15 ++- autosklearn/ensemble_building/builder.py | 19 ++-- autosklearn/ensemble_building/manager.py | 14 +-- autosklearn/ensemble_building/run.py | 6 ++ test/fixtures/automl.py | 22 +++-- test/fixtures/dask.py | 41 +++++---- test/fixtures/ensemble_building.py | 18 ++++ .../test_ensemble_builder_real.py | 92 +++++++++++++------ test/test_ensemble_builder/test_manager.py | 1 - test/test_estimators/test_estimators.py | 19 ++-- 10 files changed, 167 insertions(+), 80 deletions(-) diff --git a/autosklearn/automl.py b/autosklearn/automl.py index b1b9183cd1..d9eb60f8d4 100644 --- a/autosklearn/automl.py +++ b/autosklearn/automl.py @@ -297,6 +297,8 @@ def __init__( self._label_num = None self._parser = None self._can_predict = False + self._read_at_most = None + self._max_ensemble_build_iterations = None self.models_: Optional[dict] = None self.cv_models_: Optional[dict] = None self.ensemble_ = None @@ -796,8 +798,8 @@ def fit( max_models_on_disc=self._max_models_on_disc, seed=self._seed, precision=self.precision, - max_iterations=None, - read_at_most=None, + max_iterations=self._max_ensemble_build_iterations, + read_at_most=self._read_at_most, memory_limit=self._memory_limit, random_state=self._seed, logger_port=self._logger_port, @@ -2048,6 +2050,15 @@ def has_key(rv, key): return ensemble_dict + def has_ensemble(self) -> bool: + """ + Returns + ------- + bool + Whether this AutoML instance has an ensemble + """ + return self.ensemble_ is not None + def _create_search_space( self, tmp_dir: str, diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 428cb2bc4e..8b2cb9ef69 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -50,7 +50,7 @@ def __init__( seed: int = 1, precision: int = 32, memory_limit: int | None = 1024, - read_at_most: int | None = 5, + read_at_most: int | None = None, logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, random_state: int | np.random.RandomState | None = None, ): @@ -157,12 +157,12 @@ def __init__( self.ensemble_nbest = ensemble_nbest self.performance_range_threshold = performance_range_threshold - # Decide if self.max_models_on_disk is a memory limit or model limit + # Decide if self.max_models_on_disc is a memory limit or model limit self.max_models_on_disc: int | None = None self.model_memory_limit: float | None = None if isinstance(max_models_on_disc, int): - self.max_models_on_disk = self.max_models_on_disc + self.max_models_on_disc = self.max_models_on_disc elif isinstance(self.max_models_on_disc, float): self.model_memory_limit = self.max_models_on_disc @@ -423,7 +423,6 @@ def main( # Calculate the loss for those that require it requires_update = self.requires_loss_update(runs) - print("HERE----\n\n", str(self.read_at_most)) if self.read_at_most is not None: requires_update = requires_update[: self.read_at_most] @@ -433,14 +432,13 @@ def main( # Get the dummy and real runs dummies, candidates = split(runs, by=lambda r: r.is_dummy()) - print(dummies, candidates) # We see if we need to delete any of the real runs before we waste compute # on evaluating their candidacy for ensemble building if any(candidates): candidates, to_delete = self.requires_deletion( candidates, - max_models=self.max_models_on_disk, + max_models=self.max_models_on_disc, memory_limit=self.model_memory_limit, ) @@ -497,9 +495,9 @@ def main( current_candidate_ids = set(run.id for run in candidates) difference = previous_candidate_ids ^ current_candidate_ids - updated_candidates = iter(run in candidates for run in requires_update) + was_updated_candidates = list(run in candidates for run in requires_update) - if not any(difference) and not any(updated_candidates): + if not any(difference) and not any(was_updated_candidates): self.logger.info("All ensemble candidates the same, no update required") return self.ensemble_history, self.ensemble_nbest @@ -518,7 +516,9 @@ def main( self.logger.info(str(ensemble)) ens_perf = ensemble.get_validation_performance() self.validation_performance_ = min(self.validation_performance_, ens_perf) - self.backend.save_ensemble(ensemble, iteration, self.seed) # type: ignore + self.backend.save_ensemble( + ensemble=ensemble, idx=iteration, seed=self.seed # type: ignore + ) # Continue with evaluating the ensemble after making some space performance_stamp = {"Timestamp": pd.Timestamp.now()} @@ -901,6 +901,7 @@ def delete_runs(self, runs: Iterable[Run]) -> None: runs : Sequence[Run] The runs to delete """ + print("deleted", runs) items = iter(run for run in runs if not run.is_dummy() and run.dir.exists()) for run in items: try: diff --git a/autosklearn/ensemble_building/manager.py b/autosklearn/ensemble_building/manager.py index ab9e7f428c..d79f2bcd80 100644 --- a/autosklearn/ensemble_building/manager.py +++ b/autosklearn/ensemble_building/manager.py @@ -35,7 +35,7 @@ def __init__( seed: int = 1, precision: int = 32, memory_limit: int | None = None, - read_at_most: int | None = 5, + read_at_most: int | None = None, logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, random_state: int | np.random.RandomState | None = None, start_time: float | None = None, @@ -99,7 +99,7 @@ def __init__( memory_limit: int | None = None Memory limit in mb. If ``None``, no memory limit is enforced. - read_at_most: int = 5 + read_at_most: int | None = 5 Read at most n new prediction files in each iteration logger_port: int = DEFAULT_TCP_LOGGING_PORT @@ -196,13 +196,10 @@ def build_ensemble( if self.futures[0].done(): result = self.futures.pop().result() if result: - ensemble_history, self.ensemble_nbest, _, _, _ = result + ensemble_history, self.ensemble_nbest = result logger.debug( - "iteration={} @ elapsed_time={} has history={}".format( - self.iteration, - elapsed_time, - ensemble_history, - ) + f"iteration={self.iteration} @ elapsed_time={elapsed_time}" + f" has history={ensemble_history}" ) self.history.extend(ensemble_history) @@ -238,7 +235,6 @@ def build_ensemble( random_state=self.random_state, end_at=self.start_time + self.time_left_for_ensembles, iteration=self.iteration, - priority=100, pynisher_context=self.pynisher_context, logger_port=self.logger_port, ) diff --git a/autosklearn/ensemble_building/run.py b/autosklearn/ensemble_building/run.py index 36b9fdca89..6b30dcfb6b 100644 --- a/autosklearn/ensemble_building/run.py +++ b/autosklearn/ensemble_building/run.py @@ -15,6 +15,12 @@ class Run: """Class for storing information about a run used during ensemble building""" + # For matching prediction files + RE_MODEL_PREDICTION_FILE = ( + r"^predictions_ensemble_([0-9]*)_([0-9]*)_([0-9]{1,3}\.[0-9]*).npy$" + ) + + # For matching run directories RE_MODEL_DIR = r"^([0-9]*)_([0-9]*)_([0-9]{1,3}\.[0-9]*)$" def __init__(self, path: Path) -> None: diff --git a/test/fixtures/automl.py b/test/fixtures/automl.py index ced4297050..726424b9a0 100644 --- a/test/fixtures/automl.py +++ b/test/fixtures/automl.py @@ -7,7 +7,7 @@ from autosklearn.automl import AutoML, AutoMLClassifier, AutoMLRegressor from autosklearn.automl_common.common.utils.backend import Backend -from pytest import fixture +from pytest import FixtureRequest, fixture from unittest.mock import Mock from test.conftest import DEFAULT_SEED @@ -16,6 +16,7 @@ def _create_automl( automl_type: Type[AutoML] = AutoML, + _id: str | None = None, **kwargs: Any, ) -> AutoML: """ @@ -25,6 +26,10 @@ def _create_automl( automl_type : Type[AutoML] = AutoML The type of AutoML object to use + _id: str | None = None + If no dask client is provided, a unique id is required to create one + so that it can be shut down after the test ends + **kwargs: Any Options to pass on to the AutoML type for construction @@ -53,7 +58,8 @@ def _create_automl( opts: Dict[str, Any] = {**test_defaults, **kwargs} if "dask_client" not in opts: - client = create_test_dask_client(n_workers=opts["n_jobs"]) + assert _id is not None + client = create_test_dask_client(id=_id, n_workers=opts["n_jobs"]) opts["dask_client"] = client auto = automl_type(**opts) @@ -61,21 +67,21 @@ def _create_automl( @fixture -def make_automl() -> Callable[..., Tuple[AutoML, Callable]]: +def make_automl(request: FixtureRequest) -> Callable[..., Tuple[AutoML, Callable]]: """See `_create_automl`""" - yield partial(_create_automl, automl_type=AutoML) + yield partial(_create_automl, automl_type=AutoML, _id=request.node.nodeid) @fixture -def make_automl_classifier() -> Callable[..., AutoMLClassifier]: +def make_automl_classifier(request: FixtureRequest) -> Callable[..., AutoMLClassifier]: """See `_create_automl`""" - yield partial(_create_automl, automl_type=AutoMLClassifier) + yield partial(_create_automl, automl_type=AutoMLClassifier, _id=request.node.nodeid) @fixture -def make_automl_regressor() -> Callable[..., AutoMLRegressor]: +def make_automl_regressor(request: FixtureRequest) -> Callable[..., AutoMLRegressor]: """See `_create_automl`""" - yield partial(_create_automl, automl_type=AutoMLRegressor) + yield partial(_create_automl, automl_type=AutoMLRegressor, _id=request.node.nodeid) class AutoMLStub(AutoML): diff --git a/test/fixtures/dask.py b/test/fixtures/dask.py index 0c1f112800..40a113e9d6 100644 --- a/test/fixtures/dask.py +++ b/test/fixtures/dask.py @@ -2,6 +2,8 @@ from typing import Callable +from functools import partial + from dask.distributed import Client, get_client from pytest import FixtureRequest, fixture @@ -13,31 +15,36 @@ @fixture(autouse=True) def clean_up_any_dask_clients(request: FixtureRequest) -> None: """Auto injected fixture to close dask clients after each test""" - yield - if any(active_clients): - for adr in list(active_clients.keys()): - if request.config.getoption("verbose") > 1: - print(f"\nFixture closing dask_client at {adr}") + yield # Give control to the function + + # Initiate cleanup + id = request.node.nodeid + if id in active_clients: + if request.config.getoption("verbose") > 1: + print(f"\nFixture closing dask_client for {id}") - close = active_clients[adr] - close() - del active_clients[adr] + close = active_clients[id] + close() -def create_test_dask_client(n_workers: int = 2) -> Client: +def create_test_dask_client( + id: str, + n_workers: int = 2, +) -> Client: """Factory to make a Dask client and a function to close it - them + them. Parameters ---------- + id: str + An id to associate with this dask client + n_workers: int = 2 - inside asklea - inside AutoML. Returns ------- - Client, Callable - The client and a function to call to close that client + Client + The client """ # Workers are in subprocesses to not create deadlocks with the pynisher # and logging. @@ -57,13 +64,13 @@ def close() -> None: except Exception: pass - active_clients[adr] = close + active_clients[id] = close return client @fixture -def make_dask_client() -> Callable[[int], Client]: +def make_dask_client(request: FixtureRequest) -> Callable[[int], Client]: """Factory to make a Dask client and a function to close it Parameters @@ -76,7 +83,7 @@ def make_dask_client() -> Callable[[int], Client]: Client, Callable The client and a function to call to close that client """ - return create_test_dask_client + return partial(create_test_dask_client, id=request.node.nodeid) # TODO remove in favour of make_dask_client diff --git a/test/fixtures/ensemble_building.py b/test/fixtures/ensemble_building.py index 53080383f0..43924c5465 100644 --- a/test/fixtures/ensemble_building.py +++ b/test/fixtures/ensemble_building.py @@ -9,6 +9,7 @@ import numpy as np +from autosklearn.automl import AutoML from autosklearn.automl_common.common.utils.backend import Backend from autosklearn.constants import BINARY_CLASSIFICATION from autosklearn.data.xy_data_manager import XYDataManager @@ -103,6 +104,7 @@ def make_ensemble_builder( ) -> Callable[..., EnsembleBuilder]: def _make( *, + automl: AutoML | None = None, previous_candidates: list[Run] | None = None, backend: Backend | None = None, dataset_name: str = "TEST", @@ -111,9 +113,25 @@ def _make( **kwargs: Any, ) -> EnsembleBuilder: + if automl: + backend = automl._backend + dataset_name = automl._dataset_name + task_type = automl._task + metric = automl._metric + kwargs = { + "ensemble_size": automl._ensemble_size, + "ensemble_nbest": automl._ensemble_nbest, + "max_models_on_disc": automl._max_models_on_disc, + "precision": automl.precision, + "read_at_most": automl._read_at_most, + "memory_limit": automl._memory_limit, + "logger_port": automl._logger_port, + } + if backend is None: backend = make_backend() + # If there's no datamanager, just try populate it with some generic one, if not Path(backend._get_datamanager_pickle_filename()).exists(): datamanager = make_sklearn_dataset( name="breast_cancer", diff --git a/test/test_ensemble_builder/test_ensemble_builder_real.py b/test/test_ensemble_builder/test_ensemble_builder_real.py index 040599f5ce..9f44c83472 100644 --- a/test/test_ensemble_builder/test_ensemble_builder_real.py +++ b/test/test_ensemble_builder/test_ensemble_builder_real.py @@ -1,3 +1,6 @@ +""" +This file tests the ensemble builder with real runs generated from running AutoML +""" from __future__ import annotations from typing import Callable @@ -5,28 +8,29 @@ from autosklearn.automl import AutoML from autosklearn.ensemble_building.builder import EnsembleBuilder +import pytest from pytest_cases import parametrize_with_cases +from pytest_cases.filters import has_tag from unittest.mock import MagicMock, patch import test.test_automl.cases as cases -from test.conftest import DEFAULT_SEED -@parametrize_with_cases("automl", cases=cases, has_tag="fitted") +@parametrize_with_cases( + "automl", + cases=cases, + filter=has_tag("fitted") & ~has_tag("no_ensemble"), +) def case_real_runs( automl: AutoML, make_ensemble_builder: Callable[..., EnsembleBuilder], ) -> EnsembleBuilder: - """Uses real runs from a fitted automl instance""" - builder = make_ensemble_builder( - backend=automl._backend, - metric=automl._metric, - task_type=automl._task, - dataset_name=automl._dataset_name, - seed=automl._seed, - logger_port=automl._logger_port, - random_state=DEFAULT_SEED, - ) + """Uses real runs from a fitted automl instance which have an ensemble + + This will copy the ensemble builder based on the AutoML instance parameterss. This + includes ensemble_nbest, ensemble_size, etc... + """ + builder = make_ensemble_builder(automl=automl) return builder @@ -35,11 +39,18 @@ def test_run_builds_valid_ensemble(builder: EnsembleBuilder) -> None: """ Expects ------- - * The history returned should not be empty + * Using the same builder as used in the real run should result in the same + candidate models for the ensemble. + * Check that there is no overlap between candidate models and those deleted * The generated ensemble should not be empty - * If any deleted, should be no overlap with those deleted and ensemble + * If any deleted, should be no overlap with those deleted and those in ensemble * If any deleted, they should all be worse than those in the ensemble """ + # We need to clear previous candidates so the ensemble builder is presented with + # only "new" runs and has no information of previous candidates + if builder.previous_candidates_path.exists(): + builder.previous_candidates_path.unlink() + # So we can capture the saved ensemble mock_save = MagicMock() builder.backend.save_ensemble = mock_save # type: ignore @@ -52,24 +63,32 @@ def test_run_builds_valid_ensemble(builder: EnsembleBuilder) -> None: with patch.object(builder, "fit_ensemble", wraps=builder.fit_ensemble) as mock_fit: history, nbest = builder.main() - assert history is not None + # Check the ensemble was fitted once + mock_save.assert_called_once() + _, kwargs = mock_save.call_args + ens = kwargs["ensemble"] # `backend.save_ensemble(ens, ...)` + ensemble_ids = set(ens.get_selected_model_identifiers()) + assert len(ensemble_ids) > 0 - ens, _, _ = mock_save.call_args[0] - assert len(ens.get_selected_model_identifiers()) > 0 + # Check that the ids of runs in the ensemble were all candidates + candidates = mock_fit.call_args[0][0] # `fit_ensemble(candidates, ...)` + candidate_ids = {run.id for run in candidates} + assert ensemble_ids <= candidate_ids - ens_ids = set(ens.get_selected_model_identifiers()) - deleted = mock_delete.call_args[0][0] + args, _ = mock_delete.call_args + deleted = args[0] # `delete_runs(runs)` - # If we deleted runs, we better make sure they're worse than what's - # in the ensemble + # If we deleted runs, we better make sure of a few things if len(deleted) > 0: deleted_ids = {run.id for run in deleted} - assert len(ens_ids & deleted_ids) == 0 - ensemble_candidates = mock_fit.call_args[0][0] + # Make sure theres no overlap between candidates/ensemble and those deleted + assert not any(deleted_ids & candidate_ids) + assert not any(deleted_ids & ensemble_ids) + # Make sure that the best deleted model is better than the worst candidate best_deleted = min(deleted, key=lambda r: (r.loss, r.num_run)) - worst_candidate = max(ensemble_candidates, key=lambda r: (r.loss, r.num_run)) + worst_candidate = max(candidates, key=lambda r: (r.loss, r.num_run)) a = (worst_candidate.loss, worst_candidate.num_run) b = (best_deleted.loss, best_deleted.num_run) @@ -77,6 +96,25 @@ def test_run_builds_valid_ensemble(builder: EnsembleBuilder) -> None: @parametrize_with_cases("builder", cases=case_real_runs) -def test_main(builder: EnsembleBuilder) -> None: - result = builder.run(1, time_left=10) - raise ValueError(x) +def test_does_not_update_ensemble_with_no_new_runs(builder: EnsembleBuilder) -> None: + """ + Expects + ------- + * No new ensemble should be fitted with no new runs and no runs updated. + Since this is from a real AutoML run, running the builder again without having + trained any new models should mean that the `fit_ensemble` is never run in the + EnsembleBuilder. + """ + if not builder.previous_candidates_path.exists(): + pytest.skip("Test only valid when builder has previous candidates") + + prev_history = builder.ensemble_history + prev_nbest = builder.ensemble_nbest + + # So we can wrap and test if fit ensemble gets called + with patch.object(builder, "fit_ensemble", wraps=builder.fit_ensemble) as mock_fit: + history, nbest = builder.main() + + assert history == prev_history + assert prev_nbest == nbest + assert mock_fit.call_count == 0 diff --git a/test/test_ensemble_builder/test_manager.py b/test/test_ensemble_builder/test_manager.py index e3078c1344..7a187dce4f 100644 --- a/test/test_ensemble_builder/test_manager.py +++ b/test/test_ensemble_builder/test_manager.py @@ -6,7 +6,6 @@ from autosklearn.ensemble_building import EnsembleBuilderManager from pytest_cases import parametrize_with_cases -from unittest.mock import MagicMock, patch import test.test_automl.cases as cases from test.conftest import DEFAULT_SEED diff --git a/test/test_estimators/test_estimators.py b/test/test_estimators/test_estimators.py index 730647f948..44fc7e2d2a 100644 --- a/test/test_estimators/test_estimators.py +++ b/test/test_estimators/test_estimators.py @@ -92,13 +92,18 @@ def __call__(self, *args, **kwargs): assert getattr(get_smac_object_wrapper_instance, "dask_n_jobs") == 2 assert getattr(get_smac_object_wrapper_instance, "dask_client_n_jobs") == 2 + # DEBUG + print(os.listdir(automl.automl_._backend.get_runs_directory())) + available_num_runs = set() + print(automl.automl_.runhistory_.data) for run_key, run_value in automl.automl_.runhistory_.data.items(): if ( run_value.additional_info is not None and "num_run" in run_value.additional_info ): available_num_runs.add(run_value.additional_info["num_run"]) + available_predictions = set() predictions = glob.glob( os.path.join( @@ -110,15 +115,15 @@ def __call__(self, *args, **kwargs): seeds = set() for prediction in predictions: prediction = os.path.split(prediction)[1] - match = re.match( - Run.RE_MODEL_DIR, prediction.replace("predictions_ensemble", "") - ) + match = re.match(Run.RE_MODEL_PREDICTION_FILE, prediction) if match: - num_run = int(match.group(2)) - available_predictions.add(num_run) - seed = int(match.group(1)) - seeds.add(seed) + seed, num_run, _ = match.groups() + available_predictions.add(int(num_run)) + seeds.add(int(seed)) + print(predictions) + print(available_predictions) + print(available_num_runs) # Remove the dummy prediction, it is not part of the runhistory available_predictions.remove(1) assert available_num_runs.issubset(available_predictions) From cb723b917c8d7c90b466fc311f3c5b01622a8ada Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Thu, 5 May 2022 18:05:43 +0200 Subject: [PATCH 098/117] Make automl caching robust to `pytest-xdist` --- pyproject.toml | 2 +- test/fixtures/caching.py | 24 ++++++++ test/test_automl/cases.py | 126 +++++++++++++++++++------------------- 3 files changed, 89 insertions(+), 63 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 161ea504d3..5a131b5ec9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ [tool.pytest.ini_options] testpaths = ["test"] minversion = "3.7" -#addopts = "--cov=autosklearn" +addopts = "--forked" [tool.coverage.run] branch = true diff --git a/test/fixtures/caching.py b/test/fixtures/caching.py index 784b489af8..7453c8994d 100644 --- a/test/fixtures/caching.py +++ b/test/fixtures/caching.py @@ -2,14 +2,20 @@ from typing import Any, Callable +import os import pickle import shutil +import tempfile from functools import partial from pathlib import Path +from filelock import FileLock + from pytest import FixtureRequest from pytest_cases import fixture +LOCK_DIR = Path(tempfile.gettempdir()) + class Cache: """Used for the below fixtures. @@ -68,8 +74,10 @@ def __init__(self, key: str, cache_dir: Path, verbose: int = 0): verbose : int = 0 Whether to be verbose or not. Currently only has one level (> 0) """ + self.key = key self.dir = cache_dir / key self.verbose = verbose > 0 + self._lock: FileLock = None def items(self) -> list[Path]: """Get any paths associated to items in this dir""" @@ -103,6 +111,22 @@ def reset(self) -> None: shutil.rmtree(self.dir) self.dir.mkdir() + def __enter__(self): + if int(os.environ.get("PYTEST_XDIST_WORKER_COUNT", 1)) <= 1: + return self + else: + path = LOCK_DIR / f"{self.key}.lock" + self._lock = FileLock(path) + self._lock.acquire(poll_interval=1.0) + if self.verbose: + print(f"locked cache {path}") + + return self + + def __exit__(self, *args, **kwargs): + if self._lock is not None: + self._lock.release() + @fixture def make_cache(request: FixtureRequest) -> Callable[[str], Cache]: diff --git a/test/test_automl/cases.py b/test/test_automl/cases.py index 79b7402bbf..a6779d1b08 100644 --- a/test/test_automl/cases.py +++ b/test/test_automl/cases.py @@ -71,25 +71,25 @@ def case_classifier_fitted_holdout_iterative( resampling_strategy = "holdout-iterative-fit" key = f"case_classifier_{resampling_strategy}_{dataset}" - cache = make_cache(key) - if "model" not in cache: - # Make the model in the cache - model = make_automl_classifier( - temporary_directory=cache.path("backend"), - delete_tmp_folder_after_terminate=False, - resampling_strategy=resampling_strategy, - ) + # This locks the cache for this item while we check, required for pytest-xdist + with make_cache(key) as cache: + if "model" not in cache: + # Make the model in the cache + model = make_automl_classifier( + temporary_directory=cache.path("backend"), + delete_tmp_folder_after_terminate=False, + resampling_strategy=resampling_strategy, + ) - X, y, Xt, yt = make_sklearn_dataset(name=dataset) - model.fit(X, y, dataset_name=dataset) + X, y, Xt, yt = make_sklearn_dataset(name=dataset) + model.fit(X, y, dataset_name=dataset) - # Save the model - cache.save(model, "model") + # Save the model + cache.save(model, "model") # Try the model from the cache model = cache.load("model") - assert model is not None model._backend = copy_backend(old=model._backend, new=make_backend()) return model @@ -108,23 +108,25 @@ def case_classifier_fitted_cv( resampling_strategy = "cv" key = f"case_classifier_{resampling_strategy}_{dataset}" - cache = make_cache(key) - if "model" not in cache: - model = make_automl_classifier( - resampling_strategy=resampling_strategy, - temporary_directory=cache.path("backend"), - delete_tmp_folder_after_terminate=False, - ) + # This locks the cache for this item while we check, required for pytest-xdist + with make_cache(key) as cache: + if "model" not in cache: + model = make_automl_classifier( + resampling_strategy=resampling_strategy, + temporary_directory=cache.path("backend"), + delete_tmp_folder_after_terminate=False, + time_left_for_this_task=60, # Give some more for CV + per_run_time_limit=10, + ) - X, y, Xt, yt = make_sklearn_dataset(name=dataset) - model.fit(X, y, dataset_name=dataset) + X, y, Xt, yt = make_sklearn_dataset(name=dataset) + model.fit(X, y, dataset_name=dataset) - cache.save(model, "model") + cache.save(model, "model") # Try the model from the cache model = cache.load("model") - assert model is not None model._backend = copy_backend(old=model._backend, new=make_backend()) return model @@ -139,28 +141,27 @@ def case_regressor_fitted_holdout( make_automl_regressor: Callable[..., AutoMLRegressor], make_sklearn_dataset: Callable[..., Tuple[np.ndarray, ...]], ) -> AutoMLRegressor: - """Case of fitted regressor with cv resampling""" + """Case of fitted regressor with holdout""" resampling_strategy = "holdout" key = f"case_regressor_{resampling_strategy}_{dataset}" - cache = make_cache(key) - if "model" not in cache: - model = make_automl_regressor( - temporary_directory=cache.path("backend"), - resampling_strategy=resampling_strategy, - delete_tmp_folder_after_terminate=False, - ) + # This locks the cache for this item while we check, required for pytest-xdist + with make_cache(key) as cache: + if "model" not in cache: + model = make_automl_regressor( + temporary_directory=cache.path("backend"), + resampling_strategy=resampling_strategy, + delete_tmp_folder_after_terminate=False, + ) - X, y, Xt, yt = make_sklearn_dataset(name=dataset) - model.fit(X, y, dataset_name=dataset) + X, y, Xt, yt = make_sklearn_dataset(name=dataset) + model.fit(X, y, dataset_name=dataset) - cache.save(model, "model") + cache.save(model, "model") # Try the model from the cache model = cache.load("model") - assert model is not None - model._backend = copy_backend(old=model._backend, new=make_backend()) return model @@ -177,26 +178,27 @@ def case_regressor_fitted_cv( ) -> AutoMLRegressor: """Case of fitted regressor with cv resampling""" resampling_strategy = "cv" - key = f"case_regressor_{resampling_strategy}_{dataset}" - cache = make_cache(key) - if "model" not in cache: - model = make_automl_regressor( - temporary_directory=cache.path("backend"), - resampling_strategy=resampling_strategy, - delete_tmp_folder_after_terminate=False, - ) + # This locks the cache for this item while we check, required for pytest-xdist + with make_cache(key) as cache: + + if "model" not in cache: + model = make_automl_regressor( + temporary_directory=cache.path("backend"), + resampling_strategy=resampling_strategy, + delete_tmp_folder_after_terminate=False, + time_left_for_this_task=60, # Some extra time for CV + per_run_time_limit=10, + ) - X, y, Xt, yt = make_sklearn_dataset(name=dataset) - model.fit(X, y, dataset_name=dataset) + X, y, Xt, yt = make_sklearn_dataset(name=dataset) + model.fit(X, y, dataset_name=dataset) - cache.save(model, "model") + cache.save(model, "model") # Try the model from the cache model = cache.load("model") - assert model is not None - model._backend = copy_backend(old=model._backend, new=make_backend()) return model @@ -213,23 +215,23 @@ def case_classifier_fitted_no_ensemble( ) -> AutoMLClassifier: """Case of a fitted classifier but enemble_size was set to 0""" key = f"case_classifier_fitted_no_ensemble_{dataset}" - cache = make_cache(key) - if "model" not in cache: - model = make_automl_classifier( - temporary_directory=cache.path("backend"), - delete_tmp_folder_after_terminate=False, - ensemble_size=0, - ) + # This locks the cache for this item while we check, required for pytest-xdist + with make_cache(key) as cache: - X, y, Xt, yt = make_sklearn_dataset(name=dataset) - model.fit(X, y, dataset_name=dataset) + if "model" not in cache: + model = make_automl_classifier( + temporary_directory=cache.path("backend"), + delete_tmp_folder_after_terminate=False, + ensemble_size=0, + ) - cache.save(model, "model") + X, y, Xt, yt = make_sklearn_dataset(name=dataset) + model.fit(X, y, dataset_name=dataset) - model = cache.load("model") - assert model is not None + cache.save(model, "model") + model = cache.load("model") model._backend = copy_backend(old=model._backend, new=make_backend()) return model From cb45a35d2dfbe8aef962fe070f7aa33b59dba181 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Thu, 5 May 2022 18:06:13 +0200 Subject: [PATCH 099/117] Test fixes --- autosklearn/automl.py | 2 +- autosklearn/ensemble_building/builder.py | 27 +++++++++-- .../test_ensemble_builder_real.py | 30 ++----------- test/test_ensemble_builder/test_manager.py | 10 ----- test/test_estimators/test_estimators.py | 45 +++++++++---------- test/test_util/test_trials_callback.py | 1 - 6 files changed, 50 insertions(+), 65 deletions(-) diff --git a/autosklearn/automl.py b/autosklearn/automl.py index d9eb60f8d4..cd1db57a05 100644 --- a/autosklearn/automl.py +++ b/autosklearn/automl.py @@ -1515,7 +1515,7 @@ def fit_ensemble( "Error building the ensemble - please check the log file and command " "line output for error messages." ) - self.ensemble_performance_history, _, _, _, _ = result + self.ensemble_performance_history, _ = result self._ensemble_size = ensemble_size self._load_models() diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 8b2cb9ef69..3b54787716 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -448,7 +448,13 @@ def main( candidates = [best] to_delete.remove(best) - self.delete_runs(to_delete) + if any(to_delete): + self.logger.info( + f"Deleting runs {to_delete} due to" + f" max_models={self.max_models_on_disc} and/or" + f" memory_limit={self.model_memory_limit}" + ) + self.delete_runs(to_delete) # If there are any candidates, perform candidates selection if any(candidates): @@ -459,7 +465,13 @@ def main( nbest=self.ensemble_nbest, performance_range_threshold=self.performance_range_threshold, ) - self.delete_runs(to_delete) + if any(to_delete): + self.logger.info( + f"Deleting runs {to_delete} due to" + f" nbest={self.ensemble_nbest} and/or" + f" performance_range_threshold={self.performance_range_threshold}" + ) + self.delete_runs(to_delete) else: candidates = dummies self.logger.warning("No real runs to build ensemble from") @@ -473,7 +485,15 @@ def main( candidates = sorted(test_subset, key=lambda r: r.id) test_models = candidates - self.delete_runs(candidates_set - test_subset) + to_delete = candidates_set - test_subset + if any(to_delete): + self.logger.info( + f"Deleting runs {to_delete} due to runs not" + ' having "test_predictions" while others do not:' + f"\nHave test_predictions = {test_subset}" + f"\nNo test_predictions = {to_delete}" + ) + self.delete_runs(candidates_set - test_subset) else: candidates = sorted(candidates_set, key=lambda r: r.id) @@ -901,7 +921,6 @@ def delete_runs(self, runs: Iterable[Run]) -> None: runs : Sequence[Run] The runs to delete """ - print("deleted", runs) items = iter(run for run in runs if not run.is_dummy() and run.dir.exists()) for run in items: try: diff --git a/test/test_ensemble_builder/test_ensemble_builder_real.py b/test/test_ensemble_builder/test_ensemble_builder_real.py index 9f44c83472..fdad0a45c9 100644 --- a/test/test_ensemble_builder/test_ensemble_builder_real.py +++ b/test/test_ensemble_builder/test_ensemble_builder_real.py @@ -8,7 +8,6 @@ from autosklearn.automl import AutoML from autosklearn.ensemble_building.builder import EnsembleBuilder -import pytest from pytest_cases import parametrize_with_cases from pytest_cases.filters import has_tag from unittest.mock import MagicMock, patch @@ -75,6 +74,10 @@ def test_run_builds_valid_ensemble(builder: EnsembleBuilder) -> None: candidate_ids = {run.id for run in candidates} assert ensemble_ids <= candidate_ids + # Could be the case no run is deleted + if not mock_delete.called: + return + args, _ = mock_delete.call_args deleted = args[0] # `delete_runs(runs)` @@ -93,28 +96,3 @@ def test_run_builds_valid_ensemble(builder: EnsembleBuilder) -> None: a = (worst_candidate.loss, worst_candidate.num_run) b = (best_deleted.loss, best_deleted.num_run) assert a <= b - - -@parametrize_with_cases("builder", cases=case_real_runs) -def test_does_not_update_ensemble_with_no_new_runs(builder: EnsembleBuilder) -> None: - """ - Expects - ------- - * No new ensemble should be fitted with no new runs and no runs updated. - Since this is from a real AutoML run, running the builder again without having - trained any new models should mean that the `fit_ensemble` is never run in the - EnsembleBuilder. - """ - if not builder.previous_candidates_path.exists(): - pytest.skip("Test only valid when builder has previous candidates") - - prev_history = builder.ensemble_history - prev_nbest = builder.ensemble_nbest - - # So we can wrap and test if fit ensemble gets called - with patch.object(builder, "fit_ensemble", wraps=builder.fit_ensemble) as mock_fit: - history, nbest = builder.main() - - assert history == prev_history - assert prev_nbest == nbest - assert mock_fit.call_count == 0 diff --git a/test/test_ensemble_builder/test_manager.py b/test/test_ensemble_builder/test_manager.py index 7a187dce4f..077a2ae71c 100644 --- a/test/test_ensemble_builder/test_manager.py +++ b/test/test_ensemble_builder/test_manager.py @@ -27,13 +27,3 @@ def case_real_runs( random_state=DEFAULT_SEED, ) return manager - - -@parametrize_with_cases("manager", cases=case_real_runs) -def test_run_builds_valid_ensemble(manager: EnsembleBuilderManager) -> None: - ... - - -@parametrize_with_cases("builder", cases=case_real_runs) -def test_main(builder: EnsembleBuilderManager) -> None: - ... diff --git a/test/test_estimators/test_estimators.py b/test/test_estimators/test_estimators.py index 44fc7e2d2a..42623c522a 100644 --- a/test/test_estimators/test_estimators.py +++ b/test/test_estimators/test_estimators.py @@ -59,6 +59,8 @@ def test_fit_n_jobs(tmp_dir): Y_train += 1 Y_test += 1 + n_jobs = 2 + class get_smac_object_wrapper: def __call__(self, *args, **kwargs): self.n_jobs = kwargs["n_jobs"] @@ -77,56 +79,53 @@ def __call__(self, *args, **kwargs): per_run_time_limit=5, tmp_folder=os.path.join(tmp_dir, "backend"), seed=1, - initial_configurations_via_metalearning=0, - ensemble_size=5, - n_jobs=2, - include={"classifier": ["sgd"], "feature_preprocessor": ["no_preprocessing"]}, + n_jobs=n_jobs, get_smac_object_callback=get_smac_object_wrapper_instance, max_models_on_disc=None, ) - automl.fit(X_train, Y_train) # Test that the argument is correctly passed to SMAC - assert getattr(get_smac_object_wrapper_instance, "n_jobs") == 2 - assert getattr(get_smac_object_wrapper_instance, "dask_n_jobs") == 2 - assert getattr(get_smac_object_wrapper_instance, "dask_client_n_jobs") == 2 + assert get_smac_object_wrapper_instance.n_jobs == n_jobs + assert get_smac_object_wrapper_instance.dask_n_jobs == n_jobs + assert get_smac_object_wrapper_instance.dask_client_n_jobs == n_jobs - # DEBUG - print(os.listdir(automl.automl_._backend.get_runs_directory())) + runhistory_data = automl.automl_.runhistory_.data - available_num_runs = set() - print(automl.automl_.runhistory_.data) - for run_key, run_value in automl.automl_.runhistory_.data.items(): + successful_runs = { + run_value.additional_info["num_run"] + for run_value in runhistory_data.values() if ( run_value.additional_info is not None and "num_run" in run_value.additional_info - ): - available_num_runs.add(run_value.additional_info["num_run"]) + and run_value.status == StatusType.SUCCESS + ) + } available_predictions = set() predictions = glob.glob( os.path.join( - automl.automl_._backend.get_runs_directory(), - "*", - "predictions_ensemble*.npy", + automl.automl_._backend.get_runs_directory(), "*", "predictions_ensemble_*" ) ) seeds = set() + print("predictions", predictions) for prediction in predictions: prediction = os.path.split(prediction)[1] match = re.match(Run.RE_MODEL_PREDICTION_FILE, prediction) if match: + print(match) seed, num_run, _ = match.groups() available_predictions.add(int(num_run)) seeds.add(int(seed)) - print(predictions) - print(available_predictions) - print(available_num_runs) # Remove the dummy prediction, it is not part of the runhistory - available_predictions.remove(1) - assert available_num_runs.issubset(available_predictions) + if 1 in available_predictions: + available_predictions.remove(1) + + # Make sure all predictions available are associated with a successful run + # Don't want a rogue prediction file + assert available_predictions <= successful_runs assert len(seeds) == 1 diff --git a/test/test_util/test_trials_callback.py b/test/test_util/test_trials_callback.py index b1328b9489..e20f0abc40 100644 --- a/test/test_util/test_trials_callback.py +++ b/test/test_util/test_trials_callback.py @@ -62,7 +62,6 @@ def test_trials_callback_execution(self): time_left_for_this_task=30, initial_configurations_via_metalearning=0, per_run_time_limit=10, - memory_limit=1024, delete_tmp_folder_after_terminate=False, n_jobs=1, include={"feature_preprocessor": ["pca"], "classifier": ["sgd"]}, From cfd45f63d119537e87d24371c38a976e26a03478 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 6 May 2022 11:58:48 +0200 Subject: [PATCH 100/117] Extend interval for test on run caching --- test/test_ensemble_builder/test_run.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/test/test_ensemble_builder/test_run.py b/test/test_ensemble_builder/test_run.py index e3227d67ff..d57a581106 100644 --- a/test/test_ensemble_builder/test_run.py +++ b/test/test_ensemble_builder/test_run.py @@ -107,20 +107,25 @@ def test_caching(make_run: Callable[..., Run]) -> None: ------- * Attempting to load the same predictions again will cause the result to be cached * Unloading the cache will cause it to reload and reread the predictions + + Note + ---- + The `time.sleep` here is to give some time between accesses. Using a value of + `0.01` seemed to be too low for the github action runners """ run = make_run() path = run.pred_path() before_access = path.stat().st_atime_ns - time.sleep(0.01) + time.sleep(1) _ = run.predictions() # Should cache result load_access = path.stat().st_atime_ns # We test that it was not loaded from disk by checking when it was last accessed assert before_access != load_access - time.sleep(0.01) + time.sleep(1) _ = run.predictions() # Should use cache result cache_access = path.stat().st_atime_ns @@ -128,7 +133,7 @@ def test_caching(make_run: Callable[..., Run]) -> None: run.unload_cache() - time.sleep(0.01) + time.sleep(1) _ = run.predictions() # Should have reloaded it reloaded_access = path.stat().st_atime_ns From cc45300edc54b9adfd89af40ce7c711445c81e19 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 6 May 2022 14:00:16 +0200 Subject: [PATCH 101/117] Use pickle for reseting cache --- autosklearn/ensemble_building/builder.py | 5 ----- autosklearn/ensemble_building/run.py | 15 +++++++++------ test/test_ensemble_builder/test_run.py | 5 +++-- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 3b54787716..fe9d1243ec 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -499,11 +499,6 @@ def main( candidates = sorted(candidates_set, key=lambda r: r.id) test_models = [] - # To save on pickle and to allow for fresh predictions, unload the cache - # before pickling - for run in candidates: - run.unload_cache() - # Save the candidates for the next round with self.previous_candidates_path.open("wb") as f: pickle.dump({run.id: run for run in candidates}, f) diff --git a/autosklearn/ensemble_building/run.py b/autosklearn/ensemble_building/run.py index 6b30dcfb6b..9293fbe504 100644 --- a/autosklearn/ensemble_building/run.py +++ b/autosklearn/ensemble_building/run.py @@ -140,12 +140,15 @@ def predictions( self._cache[key] = predictions return predictions - def unload_cache(self) -> None: - """Removes the cache from this object - - We could also enforce that nothing gets pickled to disk with __getstate__ - but this is simpler and shows expliciyt behaviour in caller code. - """ + def __getstate__(self) -> dict: + """Remove the cache when pickling.""" + state = self.__dict__.copy() + del state["_cache"] + return state + + def __setstate__(self, state: dict) -> None: + """Reset state and instansiate blank cache.""" + self.__dict__.update(state) self._cache = {} @property diff --git a/test/test_ensemble_builder/test_run.py b/test/test_ensemble_builder/test_run.py index d57a581106..899b477a61 100644 --- a/test/test_ensemble_builder/test_run.py +++ b/test/test_ensemble_builder/test_run.py @@ -131,10 +131,11 @@ def test_caching(make_run: Callable[..., Run]) -> None: assert cache_access == load_access - run.unload_cache() + pickled_run = pickle.dumps(run) + unpickled_run = pickle.loads(pickled_run) time.sleep(1) - _ = run.predictions() # Should have reloaded it + _ = unpickled_run.predictions() # Should have reloaded it reloaded_access = path.stat().st_atime_ns assert reloaded_access != cache_access From 65ec881680a1db276207c88cc01e2fe2ec95d229 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 6 May 2022 16:41:41 +0200 Subject: [PATCH 102/117] Fix test for caching mechanism to not rely on `stat` --- test/test_ensemble_builder/test_run.py | 34 +++++++++----------------- 1 file changed, 12 insertions(+), 22 deletions(-) diff --git a/test/test_ensemble_builder/test_run.py b/test/test_ensemble_builder/test_run.py index 899b477a61..5c494208ca 100644 --- a/test/test_ensemble_builder/test_run.py +++ b/test/test_ensemble_builder/test_run.py @@ -107,38 +107,28 @@ def test_caching(make_run: Callable[..., Run]) -> None: ------- * Attempting to load the same predictions again will cause the result to be cached * Unloading the cache will cause it to reload and reread the predictions - - Note - ---- - The `time.sleep` here is to give some time between accesses. Using a value of - `0.01` seemed to be too low for the github action runners """ run = make_run() - path = run.pred_path() - before_access = path.stat().st_atime_ns - - time.sleep(1) - _ = run.predictions() # Should cache result - load_access = path.stat().st_atime_ns - - # We test that it was not loaded from disk by checking when it was last accessed - assert before_access != load_access + assert len(run._cache) == 0 + first_load = run.predictions() # Should cache result + assert len(run._cache) == 1 - time.sleep(1) - _ = run.predictions() # Should use cache result - cache_access = path.stat().st_atime_ns + cache_load = run.predictions() # Should use cache result + assert len(run._cache) == 1 - assert cache_access == load_access + # The should be the same object + assert id(first_load) == id(cache_load) pickled_run = pickle.dumps(run) unpickled_run = pickle.loads(pickled_run) - time.sleep(1) - _ = unpickled_run.predictions() # Should have reloaded it - reloaded_access = path.stat().st_atime_ns + assert len(unpickled_run._cache) == 0 + unpickled_load = unpickled_run.predictions() # Should have reloaded it + assert len(unpickled_run._cache) == 1 - assert reloaded_access != cache_access + # Should not be the same object as before once pickled + assert id(unpickled_load) != id(first_load) def test_equality(make_run: Callable[..., Run]) -> None: From 3c218e4d770ab38afdca25f1ff0495fb86aae849 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Wed, 11 May 2022 12:44:15 +0200 Subject: [PATCH 103/117] Move run deletion to the end of the builder `main` --- autosklearn/ensemble_building/builder.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index fe9d1243ec..b12b82cb46 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -407,6 +407,9 @@ def main( self.logger.debug(f"No targets for ensemble: {traceback.format_exc()}") raise RuntimeError("No targets for ensemble") + # We will delete runs once we are complete + deletable_runs: set[Run] = set() + # Load in information from previous candidates and also runs available_runs = self.available_runs() @@ -454,7 +457,7 @@ def main( f" max_models={self.max_models_on_disc} and/or" f" memory_limit={self.model_memory_limit}" ) - self.delete_runs(to_delete) + deletable_runs.update(to_delete) # If there are any candidates, perform candidates selection if any(candidates): @@ -471,7 +474,7 @@ def main( f" nbest={self.ensemble_nbest} and/or" f" performance_range_threshold={self.performance_range_threshold}" ) - self.delete_runs(to_delete) + deletable_runs.update(to_delete) else: candidates = dummies self.logger.warning("No real runs to build ensemble from") @@ -493,7 +496,7 @@ def main( f"\nHave test_predictions = {test_subset}" f"\nNo test_predictions = {to_delete}" ) - self.delete_runs(candidates_set - test_subset) + deletable_runs.update(to_delete) else: candidates = sorted(candidates_set, key=lambda r: r.id) @@ -573,6 +576,13 @@ def main( performance_stamp[f"ensemble_{score_name}_score"] = score self.ensemble_history.append(performance_stamp) + # Lastly, delete any runs that need to be deleted. We save this as the last step + # so that we have an ensemble saved that is up to date. If we do not do so, + # there could be runs deleted that are in th previous ensemble and we do not + # manage to update the ensemble due to a crash or the process being killed + # before it could be updated + self.delete_runs(deletable_runs) + return self.ensemble_history, self.ensemble_nbest def requires_loss_update( From 0fc809ed71c1c04f72bc7c1ac7e69c2683853c0a Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Wed, 11 May 2022 15:35:23 +0200 Subject: [PATCH 104/117] Remove `getattr` version of tae.client --- autosklearn/ensemble_building/manager.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/autosklearn/ensemble_building/manager.py b/autosklearn/ensemble_building/manager.py index d79f2bcd80..a438b6995d 100644 --- a/autosklearn/ensemble_building/manager.py +++ b/autosklearn/ensemble_building/manager.py @@ -155,8 +155,7 @@ def __call__( """ if result.status in (StatusType.STOP, StatusType.ABORT) or smbo._stop: return - client = getattr(smbo.tae_runner, "client") - self.build_ensemble(client) + self.build_ensemble(smbo.tae_runner.client) def build_ensemble( self, From b175bb09e90db82edce16abb0fea12bb84eb896b Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Wed, 11 May 2022 15:35:51 +0200 Subject: [PATCH 105/117] Remove `normalize` --- autosklearn/util/functional.py | 53 ---------------------------------- test/fixtures/datasets.py | 7 ++--- 2 files changed, 3 insertions(+), 57 deletions(-) diff --git a/autosklearn/util/functional.py b/autosklearn/util/functional.py index 043c578f09..e48f018305 100644 --- a/autosklearn/util/functional.py +++ b/autosklearn/util/functional.py @@ -5,62 +5,9 @@ from functools import reduce from itertools import chain, tee -import numpy as np - T = TypeVar("T") -def normalize(x: np.ndarray, axis: int | None = None) -> np.ndarray: - """Normalizes an array along an axis - - Note - ---- - TODO: Only works for positive numbers - - ..code:: python - - x = np.ndarray([ - [1, 1, 1], - [2, 2, 2], - [7, 7, 7], - ]) - - print(normalize(x, axis=0)) - - np.ndarray([ - [.1, .1, .1] - [.2, .2, .2] - [.7, .7, .7] - ]) - - print(normalize(x, axis=1)) - - np.ndarray([ - [.333, .333, .333] - [.333, .333, .333] - [.333, .333, .333] - ]) - - Note - ---- - Does not account for 0 sums along an axis - - Parameters - ---------- - x : np.ndarray - The array to normalize - - axis : Optional[int] = None - The axis to normalize across - - Returns - ------- - np.ndarray - The normalized array - """ - return x / x.sum(axis=axis, keepdims=True) - - def intersection(*items: Iterable[T]) -> set[T]: """Does an intersection over all collection of items diff --git a/test/fixtures/datasets.py b/test/fixtures/datasets.py index d79a228c23..569241ac51 100644 --- a/test/fixtures/datasets.py +++ b/test/fixtures/datasets.py @@ -17,7 +17,6 @@ from autosklearn.data.validation import SUPPORTED_FEAT_TYPES, SUPPORTED_TARGET_TYPES from autosklearn.data.xy_data_manager import XYDataManager from autosklearn.pipeline.util import get_dataset -from autosklearn.util.functional import normalize from pytest import fixture @@ -134,7 +133,7 @@ def _make_binary_data( weights = np.ones_like(classes) / len(classes) assert len(weights) == len(classes) - weights = normalize(np.asarray(weights)) + weights = weights / np.sum(weights, keepdims=True) X = rs.rand(*dims) y = rs.choice([0, 1], dims[0], p=weights) @@ -163,7 +162,7 @@ def _make_multiclass_data( weights = np.ones_like(classes) / len(classes) assert len(weights) == len(classes) - weights = normalize(np.asarray(weights)) + weights = weights / np.sum(weights, keepdims=True) X = rs.rand(*dims) y = rs.choice(classes, dims[0], p=weights) @@ -194,7 +193,7 @@ def _make_multilabel_data( weights = np.ones(classes.shape[0]) / len(classes) assert len(weights) == len(classes) - weights = normalize(np.asarray(weights)) + weights = weights / np.sum(weights, keepdims=True) X = rs.rand(*dims) From 2bd0c010a85f48156709bb400e546813b0b4f443 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Wed, 11 May 2022 15:37:08 +0200 Subject: [PATCH 106/117] Extend not for `Run` --- autosklearn/ensemble_building/run.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/autosklearn/ensemble_building/run.py b/autosklearn/ensemble_building/run.py index 9293fbe504..a2a0c10a4a 100644 --- a/autosklearn/ensemble_building/run.py +++ b/autosklearn/ensemble_building/run.py @@ -13,7 +13,12 @@ class Run: - """Class for storing information about a run used during ensemble building""" + """Class for storing information about a run used during ensemble building. + + Note + ---- + This is for internal use by the EnsembleBuilder and not for general usage. + """ # For matching prediction files RE_MODEL_PREDICTION_FILE = ( From 25defe8127d0f61628e077ff30e1ab6b829eb026 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Wed, 11 May 2022 15:37:43 +0200 Subject: [PATCH 107/117] Fix `__init__` of `Run` --- autosklearn/ensemble_building/run.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/autosklearn/ensemble_building/run.py b/autosklearn/ensemble_building/run.py index a2a0c10a4a..fa73f91e45 100644 --- a/autosklearn/ensemble_building/run.py +++ b/autosklearn/ensemble_building/run.py @@ -29,17 +29,12 @@ class Run: RE_MODEL_DIR = r"^([0-9]*)_([0-9]*)_([0-9]{1,3}\.[0-9]*)$" def __init__(self, path: Path) -> None: - """Creates a Run from a path point to the directory of a run + """Creates a Run from a path pointing to the directory of a run Parameters ---------- path: Path Expects something like /path/to/{seed}_{numrun}_{budget} - - Returns - ------- - Run - The run object generated from the directory """ name = path.name seed, num_run, budget = name.split("_") From 82c68f0a7ace6ba53cad6827b63d8dc9fc732f94 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Wed, 11 May 2022 15:49:45 +0200 Subject: [PATCH 108/117] Parameter and comment fixes from feedback --- autosklearn/ensemble_building/builder.py | 21 ++++++++++------- autosklearn/ensemble_building/manager.py | 30 ++++++++++++------------ 2 files changed, 27 insertions(+), 24 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index b12b82cb46..775c1f3ea1 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -43,8 +43,8 @@ def __init__( dataset_name: str, task_type: int, metric: Scorer, - ensemble_size: int = 10, - ensemble_nbest: int | float = 100, + ensemble_size: int = 50, + ensemble_nbest: int | float = 50, max_models_on_disc: int | float | None = 100, performance_range_threshold: float = 0, seed: int = 1, @@ -69,10 +69,10 @@ def __init__( metric: str name of metric to compute the loss of the given predictions - ensemble_size: int = 10 + ensemble_size: int = 50 maximal size of ensemble (passed to autosklearn.ensemble.ensemble_selection) - ensemble_nbest: int | float = 100 + ensemble_nbest: int | float = 50 * int: consider only the n best prediction (> 0) @@ -114,8 +114,10 @@ def __init__( memory_limit: int | None = 1024 memory limit in mb. If ``None``, no memory limit is enforced. - read_at_most: int | None = 5 - read at most n new prediction files in each iteration + read_at_most: int | None = None + read at most n new prediction files in each iteration. If `None`, will read + the predictions and calculate losses for all runs that require it. + logger_port: int = DEFAULT_TCP_LOGGING_PORT port that receives logging records @@ -477,10 +479,11 @@ def main( deletable_runs.update(to_delete) else: candidates = dummies - self.logger.warning("No real runs to build ensemble from") + self.logger.warning("No runs were available to build an ensemble from") - # If there's an intersect with models that have some predictions on the - # test subset, use that subset, otherwise use all of the candidates + # In case we record test predictions and not every model has test predictions, + # only use the subset of models that has predictions for both the test set and + # the ensemble optimization set. candidates_set = set(candidates) test_subset = {r for r in candidates if r.pred_path("test").exists()} diff --git a/autosklearn/ensemble_building/manager.py b/autosklearn/ensemble_building/manager.py index a438b6995d..ac3fd5c8de 100644 --- a/autosklearn/ensemble_building/manager.py +++ b/autosklearn/ensemble_building/manager.py @@ -29,8 +29,8 @@ def __init__( time_left_for_ensembles: float = 10, max_iterations: int | None = None, pynisher_context: str = "fork", - ensemble_size: int = 10, - ensemble_nbest: int | float = 100, + ensemble_size: int = 50, + ensemble_nbest: int | float = 50, max_models_on_disc: int | float | None = None, seed: int = 1, precision: int = 32, @@ -67,13 +67,12 @@ def __init__( pynisher_context: "spawn" | "fork" | "forkserver" = "fork" The multiprocessing context for pynisher. - ensemble_size: int = 10 + ensemble_size: int = 50 maximal size of ensemble - ensemble_nbest: int | float = 100 + ensemble_nbest: int | float = 50 If int: consider only the n best prediction If float: consider only this fraction of the best models - If performance_range_threshold > 0, might return less models max_models_on_disc: int | float | None = None Defines the maximum number of models that are kept in the disc. @@ -99,8 +98,9 @@ def __init__( memory_limit: int | None = None Memory limit in mb. If ``None``, no memory limit is enforced. - read_at_most: int | None = 5 - Read at most n new prediction files in each iteration + read_at_most: int | None = None + read at most n new prediction files in each iteration. If `None`, will read + the predictions and calculate losses for all runs that require it. logger_port: int = DEFAULT_TCP_LOGGING_PORT Port that receives logging records @@ -265,13 +265,13 @@ def fit_and_return_ensemble( task_type: int, metric: Scorer, pynisher_context: str, - ensemble_size: int = 10, - ensemble_nbest: int | float = 100, + ensemble_size: int = 50, + ensemble_nbest: int | float = 50, max_models_on_disc: int | float | None = None, seed: int = 1, precision: int = 32, memory_limit: int | None = None, - read_at_most: int | None = 5, + read_at_most: int | None = None, logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, random_state: int | np.random.RandomState | None = None, ) -> tuple[list[dict[str, Any]], int | float]: @@ -303,13 +303,12 @@ def fit_and_return_ensemble( pynisher_context: "fork" | "spawn" | "forkserver" = "fork" Context to use for multiprocessing, can be either fork, spawn or forkserver. - ensemble_size: int = 10 + ensemble_size: int = 50 Maximal size of ensemble - ensemble_nbest: int | float = 1000 + ensemble_nbest: int | float = 50 If int: consider only the n best prediction If float: consider only this fraction of the best models - If performance_range_threshold > 0, might return less models max_models_on_disc: int | float | None = 100 Defines the maximum number of models that are kept in the disc. @@ -334,8 +333,9 @@ def fit_and_return_ensemble( memory_limit: int | None = None Memory limit in mb. If ``None``, no memory limit is enforced. - read_at_most: int | None = 5 - Read at most n new prediction files in each iteration + read_at_most: int | None = None + read at most n new prediction files in each iteration. If `None`, will read + the predictions and calculate losses for all runs that require it. logger_port: int = DEFAULT_TCP_LOGGING_PORT The port where the logging server is listening to. From ef7848f3f1082a018a411d2a59e24c7636627622 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Wed, 11 May 2022 15:57:18 +0200 Subject: [PATCH 109/117] Change to `min(...)` instead of `sorted(...)[0]` --- autosklearn/ensemble_building/builder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 775c1f3ea1..cb36ae60e9 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -449,7 +449,7 @@ def main( # If there are no candidates left, we just keep the best one if not any(candidates): - best = sorted(to_delete, key=lambda r: (r.loss, r.num_run))[0] + best = min(to_delete, key=lambda r: (r.loss, r.num_run)) candidates = [best] to_delete.remove(best) From c990e602278994c780a0b30634eb8a501bcc4bee Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Wed, 11 May 2022 19:03:00 +0200 Subject: [PATCH 110/117] Make default time `np.inf` --- autosklearn/ensemble_building/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/autosklearn/ensemble_building/manager.py b/autosklearn/ensemble_building/manager.py index ac3fd5c8de..46ab291bc2 100644 --- a/autosklearn/ensemble_building/manager.py +++ b/autosklearn/ensemble_building/manager.py @@ -26,7 +26,7 @@ def __init__( dataset_name: str, task: int, metric: Scorer, - time_left_for_ensembles: float = 10, + time_left_for_ensembles: float = np.inf, max_iterations: int | None = None, pynisher_context: str = "fork", ensemble_size: int = 50, @@ -56,7 +56,7 @@ def __init__( metric: Scorer Metric to compute the loss of the given predictions - time_left_for_ensemble: float = 10 + time_left_for_ensemble: float = np.inf How much time is left for the task in seconds. Job should finish within this allocated time From 64768568d47626a8f9c64d23b21b0101219d25c3 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Wed, 11 May 2022 19:03:39 +0200 Subject: [PATCH 111/117] Add test for safe deletion in builder --- autosklearn/ensemble_building/builder.py | 19 +---- test/fixtures/backend.py | 8 ++ .../test_ensemble_builder.py | 82 +++++++++++++++++++ 3 files changed, 94 insertions(+), 15 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index cb36ae60e9..75c7373acc 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -17,7 +17,6 @@ import pynisher from autosklearn.automl_common.common.utils.backend import Backend -from autosklearn.constants import BINARY_CLASSIFICATION from autosklearn.data.xy_data_manager import XYDataManager from autosklearn.ensemble_building.run import Run, RunID from autosklearn.ensembles.ensemble_selection import EnsembleSelection @@ -164,9 +163,9 @@ def __init__( self.model_memory_limit: float | None = None if isinstance(max_models_on_disc, int): - self.max_models_on_disc = self.max_models_on_disc - elif isinstance(self.max_models_on_disc, float): - self.model_memory_limit = self.max_models_on_disc + self.max_models_on_disc = max_models_on_disc + elif isinstance(max_models_on_disc, float): + self.model_memory_limit = max_models_on_disc # The starting time of the procedure self.start_time: float = 0.0 @@ -259,7 +258,7 @@ def run( How much time should be left for this run. Either this or `end_at` must be provided. - end_at : float | None = Non + end_at : float | None = None When this run should end. Either this or `time_left` must be provided. time_buffer : int = 5 @@ -559,16 +558,6 @@ def main( run_preds = [r.predictions(kind, precision=self.precision) for r in models] pred = ensemble.predict(run_preds) - # Pretty sure this whole step is uneeded but left over and afraid - # to touch - if self.task_type == BINARY_CLASSIFICATION: - pred = pred[:, 1] - - if pred.ndim == 1 or pred.shape[1] == 1: - pred = np.vstack( - ((1 - pred).reshape((1, -1)), pred.reshape((1, -1))) - ).transpose() - score = calculate_score( solution=pred_targets, prediction=pred, diff --git a/test/fixtures/backend.py b/test/fixtures/backend.py index 4770b1873e..2dfcc4d472 100644 --- a/test/fixtures/backend.py +++ b/test/fixtures/backend.py @@ -7,6 +7,7 @@ from pathlib import Path from autosklearn.automl_common.common.utils.backend import Backend, create +from autosklearn.data.xy_data_manager import XYDataManager from pytest import fixture @@ -97,7 +98,11 @@ def make_backend(tmp_path: Path) -> Callable[..., Backend]: def _make( path: str | Path | None = None, template: Path | Backend | None = None, + datamanager: XYDataManager | None = None, ) -> Backend: + if template is not None and datamanager is not None: + raise ValueError("Does not support template and datamanager") + if path is None: _path = Path(tmp_path) / "backend" elif isinstance(path, str): @@ -116,6 +121,9 @@ def _make( prefix="auto-sklearn", ) + if datamanager is not None: + backend.save_datamanager(datamanager) + return backend return _make diff --git a/test/test_ensemble_builder/test_ensemble_builder.py b/test/test_ensemble_builder/test_ensemble_builder.py index 9a3e2a374f..9aa9c38899 100644 --- a/test/test_ensemble_builder/test_ensemble_builder.py +++ b/test/test_ensemble_builder/test_ensemble_builder.py @@ -8,6 +8,7 @@ import numpy as np +from autosklearn.automl_common.common.utils.backend import Backend from autosklearn.ensemble_building import EnsembleBuilder, Run from autosklearn.util.functional import bound, pairs @@ -15,6 +16,8 @@ from pytest_cases import fixture, parametrize from unittest.mock import patch +from test.conftest import DEFAULT_SEED + @fixture def builder(make_ensemble_builder: Callable[..., EnsembleBuilder]) -> EnsembleBuilder: @@ -552,3 +555,82 @@ def test_run_end_at(builder: EnsembleBuilder, time_buffer: int, duration: int) - # The 1 comes from the small overhead in conjuction with rounding down expected = duration - time_buffer - 1 assert pynisher_mock.call_args_list[0][1]["wall_time_in_s"] == expected + + +def test_deletion_will_not_break_current_ensemble( + make_backend: Callable[..., Backend], + make_ensemble_builder: Callable[..., EnsembleBuilder], + make_run: Callable[..., Run], +) -> None: + """ + Expects + ------- + * When running the builder, it's previous ensemble should not have it's runs deleted + until a new ensemble is built. + """ + # Make a builder with this backend and limit it to only allow 10 models on disc + builder = make_ensemble_builder( + max_models_on_disc=10, + seed=DEFAULT_SEED, + ) + + # Stick a dummy run and 10 bad runs into the backend + datamanager = builder.backend.load_datamanager() + targets = datamanager.data["Y_train"] + + bad_predictions = {"ensemble": np.zeros_like(targets)} + good_predictions = {"ensemble": targets} + + make_run(dummy=True, loss=10000, backend=builder.backend) + bad_runs = [ + make_run(backend=builder.backend, predictions=bad_predictions) + for _ in range(10) + ] + + ens_dir = Path(builder.backend.get_ensemble_dir()) + + # Make sure there's no ensemble and run with the candidates available + assert not ens_dir.exists() + builder.main(time_left=100) + + # Make sure an ensemble was built + assert ens_dir.exists() + first_builder_contents = set(ens_dir.iterdir()) + + # Create 10 new and better runs and put them in the backend + new_runs = [ + make_run(backend=builder.backend, predictions=good_predictions) + for _ in range(10) + ] + + # Now we make `save_ensemble` crash so that even though we run the builder, it does + # not manage to save the new ensemble + with patch.object(builder.backend, "save_ensemble", side_effect=ValueError): + try: + builder.main(time_left=100) + except Exception: + pass + + # Ensure that no new ensemble was created + second_builder_contents = set(ens_dir.iterdir()) + assert first_builder_contents == second_builder_contents + + # Now we make sure that the ensemble there still has access to all the bad models + # that it contained from the first run, even though the second crashed. + available_runs = builder.available_runs().values() + for run in bad_runs + new_runs: + assert run in available_runs + + # As a sanity check, run the builder one more time without crashing and make + # sure the bad runs are removed with the good ones kept. + # We remove its previous candidates so that it won't remember previous candidates + # and will fit a new ensemble + builder.previous_candidates_path.unlink() + builder.main(time_left=100) + available_runs = builder.available_runs().values() + + for run in bad_runs: + assert run not in available_runs + + for run in new_runs: + assert run in available_runs From 936fba5db25ae863297dcc31f758e72c504a59af Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Wed, 11 May 2022 19:05:52 +0200 Subject: [PATCH 112/117] Update docstring of `loss` for a run --- autosklearn/ensemble_building/builder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index 75c7373acc..b364e1848f 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -877,7 +877,7 @@ def requires_deletion( return keep, delete def loss(self, run: Run, kind: str = "ensemble") -> float: - """Calculate the loss for a list of runs + """Calculate the loss for a run Parameters ---------- From 869504916f2f5f687f5a7a0e8bbdb3a2f756409e Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Wed, 11 May 2022 19:27:53 +0200 Subject: [PATCH 113/117] Remove stray print --- test/test_estimators/test_estimators.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/test_estimators/test_estimators.py b/test/test_estimators/test_estimators.py index 42623c522a..338c7170c2 100644 --- a/test/test_estimators/test_estimators.py +++ b/test/test_estimators/test_estimators.py @@ -109,7 +109,6 @@ def __call__(self, *args, **kwargs): ) ) seeds = set() - print("predictions", predictions) for prediction in predictions: prediction = os.path.split(prediction)[1] match = re.match(Run.RE_MODEL_PREDICTION_FILE, prediction) From c2111c2d7b3a7c1db7b3c214ee14fa23377067e3 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Wed, 11 May 2022 19:28:18 +0200 Subject: [PATCH 114/117] Minor feedback fixes --- autosklearn/ensemble_building/builder.py | 5 ++--- test/test_ensemble_builder/test_ensemble_builder.py | 2 ++ 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/autosklearn/ensemble_building/builder.py b/autosklearn/ensemble_building/builder.py index b364e1848f..4df53ad91f 100644 --- a/autosklearn/ensemble_building/builder.py +++ b/autosklearn/ensemble_building/builder.py @@ -540,7 +540,6 @@ def main( ensemble=ensemble, idx=iteration, seed=self.seed # type: ignore ) - # Continue with evaluating the ensemble after making some space performance_stamp = {"Timestamp": pd.Timestamp.now()} for kind, score_name, models in [ @@ -717,7 +716,7 @@ def candidate_selection( # Always preserve at least one, the best if len(candidates) == 0: candidates, discared = cut(discarded, 1) - self.logger.warning("nbest too aggresive, using best") + self.logger.warning("nbest too aggresive, using single best") all_discarded.update(discarded) @@ -733,7 +732,7 @@ def candidate_selection( # Always preserve at least one, the best if len(candidates) == 0: candidates, discared = cut(discarded, 1) - self.logger.warning("No models in performance range, using best") + self.logger.warning("No models in performance range, using single best") all_discarded.update(discarded) diff --git a/test/test_ensemble_builder/test_ensemble_builder.py b/test/test_ensemble_builder/test_ensemble_builder.py index 9aa9c38899..7c5e593864 100644 --- a/test/test_ensemble_builder/test_ensemble_builder.py +++ b/test/test_ensemble_builder/test_ensemble_builder.py @@ -321,6 +321,8 @@ def test_candidates_performance_range_threshold( performance_range_threshold=threshold, ) + assert len(candidates) > 0 + # When no run is better than threshold, we just get 1 candidate, # Make sure it's the best if len(candidates) == 1: From b326cc98f3d49f30bf8ae2910fffbc66442d4dcd Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 13 May 2022 16:04:00 +0200 Subject: [PATCH 115/117] Fix `_metric` to `_metrics` --- test/fixtures/ensemble_building.py | 2 +- test/test_ensemble_builder/test_manager.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/fixtures/ensemble_building.py b/test/fixtures/ensemble_building.py index 43924c5465..d441e41959 100644 --- a/test/fixtures/ensemble_building.py +++ b/test/fixtures/ensemble_building.py @@ -117,7 +117,7 @@ def _make( backend = automl._backend dataset_name = automl._dataset_name task_type = automl._task - metric = automl._metric + metric = automl._metrics kwargs = { "ensemble_size": automl._ensemble_size, "ensemble_nbest": automl._ensemble_nbest, diff --git a/test/test_ensemble_builder/test_manager.py b/test/test_ensemble_builder/test_manager.py index 077a2ae71c..558cea8ac6 100644 --- a/test/test_ensemble_builder/test_manager.py +++ b/test/test_ensemble_builder/test_manager.py @@ -19,7 +19,7 @@ def case_real_runs( """Uses real runs from a fitted automl instance""" manager = make_ensemble_builder_manager( backend=automl._backend, - metric=automl._metric, + metric=automl._metrics, task=automl._task, dataset_name=automl._dataset_name, seed=automl._seed, From 4e4ea6454c93fb6bc7c2a17635e4dbea9588b530 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 13 May 2022 16:58:36 +0200 Subject: [PATCH 116/117] Fix `make_ensemble_builder` --- test/fixtures/ensemble_building.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/fixtures/ensemble_building.py b/test/fixtures/ensemble_building.py index d441e41959..42dd7fbb9a 100644 --- a/test/fixtures/ensemble_building.py +++ b/test/fixtures/ensemble_building.py @@ -117,7 +117,7 @@ def _make( backend = automl._backend dataset_name = automl._dataset_name task_type = automl._task - metric = automl._metrics + metric = automl._metrics[0] kwargs = { "ensemble_size": automl._ensemble_size, "ensemble_nbest": automl._ensemble_nbest, From 92f59c28884bb52664c5425040e6922d9dfab3d3 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 13 May 2022 17:01:41 +0200 Subject: [PATCH 117/117] One more fix for multiple metrics --- test/test_ensemble_builder/test_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_ensemble_builder/test_manager.py b/test/test_ensemble_builder/test_manager.py index 558cea8ac6..6e4039ca86 100644 --- a/test/test_ensemble_builder/test_manager.py +++ b/test/test_ensemble_builder/test_manager.py @@ -19,7 +19,7 @@ def case_real_runs( """Uses real runs from a fitted automl instance""" manager = make_ensemble_builder_manager( backend=automl._backend, - metric=automl._metrics, + metric=automl._metrics[0], task=automl._task, dataset_name=automl._dataset_name, seed=automl._seed,