diff --git a/autosklearn/automl.py b/autosklearn/automl.py index 520abf97c9..add43d66aa 100644 --- a/autosklearn/automl.py +++ b/autosklearn/automl.py @@ -43,7 +43,6 @@ from autosklearn.util.stopwatch import StopWatch from autosklearn.util.logging_ import ( get_logger, - is_port_in_use, LogRecordSocketReceiver, setup_logger, ) @@ -247,7 +246,11 @@ def _create_dask_client(self): # file was deleted, so the client could not close # the worker properly local_directory=tempfile.gettempdir(), - ) + # Memory is handled by the pynisher, not by the dask worker/nanny + memory_limit=0, + ), + # Heartbeat every 10s + heartbeat_interval=10000, ) def _close_dask_client(self): @@ -269,14 +272,13 @@ def _get_logger(self, name): # Setup the configuration for the logger # This is gonna be honored by the server # Which is created below - setup_logger(os.path.join(self._backend.temporary_directory, - '%s.log' % str(logger_name)), - self.logging_config, - ) - - # The desired port might be used, so check this - while is_port_in_use(self._logger_port): - self._logger_port += 1 + setup_logger( + output_file=os.path.join( + self._backend.temporary_directory, '%s.log' % str(logger_name) + ), + logging_config=self.logging_config, + output_dir=self._backend.temporary_directory, + ) # As Auto-sklearn works with distributed process, # we implement a logger server that can receive tcp @@ -284,11 +286,21 @@ def _get_logger(self, name): # under the above logging configuration setting # We need to specify the logger_name so that received records # are treated under the logger_name ROOT logger setting - self.stop_logging_server = multiprocessing.Event() - self.logger_tcpserver = LogRecordSocketReceiver(logname=logger_name, - port=self._logger_port, - event=self.stop_logging_server) - self.logging_server = multiprocessing.Process( + context = multiprocessing.get_context('fork') + self.stop_logging_server = context.Event() + + while True: + # Loop until we find a valid port + self._logger_port = np.random.randint(10000, 65535) + try: + self.logger_tcpserver = LogRecordSocketReceiver(logname=logger_name, + port=self._logger_port, + event=self.stop_logging_server) + break + except OSError: + continue + + self.logging_server = context.Process( target=self.logger_tcpserver.serve_until_stopped) self.logging_server.daemon = False self.logging_server.start() @@ -354,7 +366,6 @@ def _do_dummy_prediction(self, datamanager, num_run): autosklearn_seed=self._seed, resampling_strategy=self._resampling_strategy, initial_num_run=num_run, - logger=self._logger, stats=stats, metric=self._metric, memory_limit=memory_limit, @@ -409,6 +420,9 @@ def fit( only_return_configuration_space: Optional[bool] = False, load_models: bool = True, ): + self._backend.save_start_time(self._seed) + self._stopwatch = StopWatch() + # Make sure that input is valid # Performs Ordinal one hot encoding to the target # both for train and test data @@ -434,6 +448,12 @@ def fit( raise ValueError('Metric must be instance of ' 'autosklearn.metrics.Scorer.') + if dataset_name is None: + dataset_name = hash_array_or_matrix(X) + # By default try to use the TCP logging port or get a new port + self._logger_port = logging.handlers.DEFAULT_TCP_LOGGING_PORT + self._logger = self._get_logger(dataset_name) + # If no dask client was provided, we create one, so that we can # start a ensemble process in parallel to smbo optimize if ( @@ -444,18 +464,9 @@ def fit( else: self._is_dask_client_internally_created = False - if dataset_name is None: - dataset_name = hash_array_or_matrix(X) - - self._backend.save_start_time(self._seed) - self._stopwatch = StopWatch() self._dataset_name = dataset_name self._stopwatch.start_task(self._dataset_name) - # By default try to use the TCP logging port or get a new port - self._logger_port = logging.handlers.DEFAULT_TCP_LOGGING_PORT - self._logger = self._get_logger(dataset_name) - if feat_type is not None and len(feat_type) != X.shape[1]: raise ValueError('Array feat_type does not have same number of ' 'variables as X has features. %d vs %d.' % diff --git a/autosklearn/ensemble_builder.py b/autosklearn/ensemble_builder.py index 0a344e96e2..c69cb25b3b 100644 --- a/autosklearn/ensemble_builder.py +++ b/autosklearn/ensemble_builder.py @@ -4,10 +4,12 @@ import math import numbers import logging.handlers +import multiprocessing import os import pickle import re import shutil +import sys import time import traceback from typing import List, Optional, Tuple, Union @@ -28,7 +30,7 @@ from autosklearn.metrics import calculate_score, Scorer from autosklearn.ensembles.ensemble_selection import EnsembleSelection from autosklearn.ensembles.abstract_ensemble import AbstractEnsemble -from autosklearn.util.logging_ import get_named_client_logger +from autosklearn.util.logging_ import get_named_client_logger, get_logger Y_ENSEMBLE = 0 Y_VALID = 1 @@ -153,7 +155,7 @@ def build_ensemble(self, dask_client: dask.distributed.Client) -> None: # The second criteria is elapsed time elapsed_time = time.time() - self.start_time - logger = get_named_client_logger('EnsembleBuilder', port=self.logger_port) + logger = get_logger('EnsembleBuilder') # First test for termination conditions if self.time_left_for_ensembles < elapsed_time: @@ -562,10 +564,17 @@ def run( if time_left - time_buffer < 1: break + context = multiprocessing.get_context('forkserver') + # Try to copy as many modules into the new context to reduce startup time + # http://www.bnikolic.co.uk/blog/python/parallelism/2019/11/13/python-forkserver-preload.html + # do not copy the logging module as it causes deadlocks! + preload_modules = list(filter(lambda key: 'logging' not in key, sys.modules.keys())) + context.set_forkserver_preload(preload_modules) safe_ensemble_script = pynisher.enforce_limits( wall_time_in_s=int(time_left - time_buffer), mem_in_mb=self.memory_limit, - logger=self.logger + logger=self.logger, + context=context, )(self.main) safe_ensemble_script(time_left, iteration, return_predictions) if safe_ensemble_script.exit_status is pynisher.MemorylimitException: @@ -1385,24 +1394,11 @@ def _delete_excess_models(self, selected_keys: List[str]): """ - # Obtain a list of sorted pred keys - sorted_keys = self._get_list_of_sorted_preds() - sorted_keys = list(map(lambda x: x[0], sorted_keys)) - - if len(sorted_keys) <= self.max_resident_models: - # Don't waste time if not enough models to delete - return - - # The top self.max_resident_models models would be the candidates - # Any other low performance model will be deleted - # The list is in ascending order of score - candidates = sorted_keys[:self.max_resident_models] - # Loop through the files currently in the directory for pred_path in self.y_ens_files: # Do not delete candidates - if pred_path in candidates: + if pred_path in selected_keys: continue if pred_path in self._has_been_candidate: diff --git a/autosklearn/estimators.py b/autosklearn/estimators.py index 89e731a202..1119e1052a 100644 --- a/autosklearn/estimators.py +++ b/autosklearn/estimators.py @@ -177,6 +177,10 @@ def __init__( dask_client : dask.distributed.Client, optional User-created dask client, can be used to start a dask cluster and then attach auto-sklearn to it. + + Auto-sklearn can run into a deadlock if the dask client uses threads for + parallelization, it is therefore highly recommended to use dask workers + using a single process. disable_evaluator_output: bool or list, optional (False) If True, disable model and prediction output. Cannot be used diff --git a/autosklearn/evaluation/__init__.py b/autosklearn/evaluation/__init__.py index 541e782f29..3e389a4478 100644 --- a/autosklearn/evaluation/__init__.py +++ b/autosklearn/evaluation/__init__.py @@ -4,6 +4,7 @@ import math import multiprocessing from queue import Empty +import sys import time import traceback from typing import Dict, List, Optional, Tuple, Union @@ -96,7 +97,7 @@ def _encode_exit_status(exit_status): class ExecuteTaFuncWithQueue(AbstractTAFunc): def __init__(self, backend, autosklearn_seed, resampling_strategy, metric, - logger, cost_for_crash, abort_on_first_run_crash, + cost_for_crash, abort_on_first_run_crash, initial_num_run=1, stats=None, run_obj='quality', par_factor=1, all_scoring_functions=False, output_y_hat_optimization=True, include=None, exclude=None, @@ -160,7 +161,6 @@ def __init__(self, backend, autosklearn_seed, resampling_strategy, metric, self.disable_file_output = disable_file_output self.init_params = init_params self.budget_type = budget_type - self.logger = logger if memory_limit is not None: memory_limit = int(math.ceil(memory_limit)) @@ -244,7 +244,13 @@ def run( instance_specific: Optional[str] = None, ) -> Tuple[StatusType, float, float, Dict[str, Union[int, float, str, Dict, List, Tuple]]]: - queue = multiprocessing.Queue() + context = multiprocessing.get_context('forkserver') + # Try to copy as many modules into the new context to reduce startup time + # http://www.bnikolic.co.uk/blog/python/parallelism/2019/11/13/python-forkserver-preload.html + # do not copy the logging module as it causes deadlocks! + preload_modules = list(filter(lambda key: 'logging' not in key, sys.modules.keys())) + context.set_forkserver_preload(preload_modules) + queue = context.Queue() if not (instance_specific is None or instance_specific == '0'): raise ValueError(instance_specific) @@ -257,6 +263,7 @@ def run( wall_time_in_s=cutoff, mem_in_mb=self.memory_limit, capture_output=True, + context=context, ) if isinstance(config, int): @@ -436,8 +443,4 @@ def run( runtime = float(obj.wall_clock_time) autosklearn.evaluation.util.empty_queue(queue) - self.logger.debug( - 'Finished function evaluation. Status: %s, Cost: %f, Runtime: %f, Additional %s', - status, cost, runtime, additional_run_info, - ) return status, cost, runtime, additional_run_info diff --git a/autosklearn/metalearning/input/aslib_simple.py b/autosklearn/metalearning/input/aslib_simple.py index eb7b5e47b9..7bac637c50 100644 --- a/autosklearn/metalearning/input/aslib_simple.py +++ b/autosklearn/metalearning/input/aslib_simple.py @@ -57,8 +57,9 @@ def _find_files(self): for expected_file in optional: full_path = os.path.join(self.dir_, expected_file) if not os.path.isfile(full_path): - self.logger.warning( - "Not found: %s (maybe you want to add it)" % (full_path)) + # self.logger.warning( + # "Not found: %s (maybe you want to add it)" % (full_path)) + pass else: self.found_files.append(full_path) diff --git a/autosklearn/metalearning/mismbo.py b/autosklearn/metalearning/mismbo.py index 8dea9ba3c1..0a9e6d3c4d 100644 --- a/autosklearn/metalearning/mismbo.py +++ b/autosklearn/metalearning/mismbo.py @@ -19,7 +19,7 @@ def suggest_via_metalearning( task = TASK_TYPES_TO_STRING[task] - logger.warning(task) + logger.info(task) start = time.time() ml = MetaLearningOptimizer( diff --git a/autosklearn/smbo.py b/autosklearn/smbo.py index 316c2fa880..cbb353177c 100644 --- a/autosklearn/smbo.py +++ b/autosklearn/smbo.py @@ -430,7 +430,6 @@ def run_smbo(self): autosklearn_seed=seed, resampling_strategy=self.resampling_strategy, initial_num_run=num_run, - logger=self.logger, include=include, exclude=exclude, metric=self.metric, diff --git a/autosklearn/util/logging.yaml b/autosklearn/util/logging.yaml index 613154d228..62db539ae2 100644 --- a/autosklearn/util/logging.yaml +++ b/autosklearn/util/logging.yaml @@ -18,6 +18,12 @@ handlers: formatter: simple filename: autosklearn.log + distributed_logfile: + class: logging.FileHandler + level: DEBUG + formatter: simple + filename: distributed.log + root: level: DEBUG handlers: [console, file_handler] @@ -26,7 +32,6 @@ loggers: autosklearn.metalearning: level: DEBUG handlers: [file_handler] - propagate: no autosklearn.util.backend: level: DEBUG @@ -48,3 +53,7 @@ loggers: EnsembleBuilder: level: DEBUG propagate: no + + distributed: + level: DEBUG + handlers: [distributed_logfile] diff --git a/autosklearn/util/logging_.py b/autosklearn/util/logging_.py index 1e91783a64..07d70b07a4 100644 --- a/autosklearn/util/logging_.py +++ b/autosklearn/util/logging_.py @@ -5,7 +5,6 @@ import os import pickle import select -import socket import socketserver import struct import threading @@ -14,20 +13,30 @@ import yaml -def setup_logger(output_file: Optional[str] = None, logging_config: Optional[Dict] = None - ) -> None: +def setup_logger( + output_file: Optional[str] = None, + logging_config: Optional[Dict] = None, + output_dir: Optional[str] = None, +) -> None: # logging_config must be a dictionary object specifying the configuration # for the loggers to be used in auto-sklearn. if logging_config is not None: if output_file is not None: logging_config['handlers']['file_handler']['filename'] = output_file + if output_dir is not None: + logging_config['handlers']['distributed_logfile']['filename'] = os.path.join( + output_dir, 'distributed.log' + ) logging.config.dictConfig(logging_config) else: - with open(os.path.join(os.path.dirname(__file__), 'logging.yaml'), - 'r') as fh: + with open(os.path.join(os.path.dirname(__file__), 'logging.yaml'), 'r') as fh: logging_config = yaml.safe_load(fh) if output_file is not None: logging_config['handlers']['file_handler']['filename'] = output_file + if output_dir is not None: + logging_config['handlers']['distributed_logfile']['filename'] = os.path.join( + output_dir, 'distributed.log' + ) logging.config.dictConfig(logging_config) @@ -40,11 +49,6 @@ def get_logger(name: str) -> 'PickableLoggerAdapter': return logger -def is_port_in_use(port: int) -> bool: - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - return s.connect_ex(('localhost', port)) == 0 - - def get_named_client_logger(name: str, host: str = 'localhost', port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT ) -> 'PickableLoggerAdapter': @@ -157,13 +161,13 @@ def handle(self) -> None: according to whatever policy is configured locally. """ while True: - chunk = self.connection.recv(4) + chunk = self.connection.recv(4) # type: ignore[attr-defined] if len(chunk) < 4: break slen = struct.unpack('>L', chunk)[0] - chunk = self.connection.recv(slen) + chunk = self.connection.recv(slen) # type: ignore[attr-defined] while len(chunk) < slen: - chunk = chunk + self.connection.recv(slen - len(chunk)) + chunk = chunk + self.connection.recv(slen - len(chunk)) # type: ignore[attr-defined] # noqa: E501 obj = self.unPickle(chunk) record = logging.makeLogRecord(obj) self.handleLogRecord(record) @@ -216,6 +220,5 @@ def serve_until_stopped(self) -> None: self.timeout) if rd: self.handle_request() - if self.event is not None and self.event.is_set(): break diff --git a/requirements.txt b/requirements.txt index 45dee41ed8..bf8fcafa0b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,6 +14,6 @@ pandas>=1.0 liac-arff ConfigSpace>=0.4.14,<0.5 -pynisher>=0.6.1 +pynisher>=0.6.2 pyrfr>=0.7,<0.9 smac>=0.13.1,<0.14 diff --git a/scripts/02_retrieve_metadata.py b/scripts/02_retrieve_metadata.py index 6e05f5ec0a..611b190dfa 100644 --- a/scripts/02_retrieve_metadata.py +++ b/scripts/02_retrieve_metadata.py @@ -68,7 +68,7 @@ def retrieve_matadata(validation_directory, metric, configuration_space, best_configuration = Configuration( configuration_space=configuration_space, values=config) best_value = score - best_configuration_dir = ped + best_configuration_dir = validation_trajectory_file except Exception as e: print(e) n_broken += 1 diff --git a/test/conftest.py b/test/conftest.py index 992d8ce372..e9a42efd3f 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -120,30 +120,13 @@ def session_run_at_end(): @pytest.fixture(scope="function") def dask_client(request): """ - This fixture is meant to be called one per pytest session. - - The goal of this function is to create a global client at the start - of the testing phase. We can create clients at the start of the - session (this case, as above scope is session), module, class or function - level. - - The overhead of creating a dask client per class/module/session is something - that travis cannot handle, so we rely on the following execution flow: - - 1- At the start of the pytest session, session_run_at_beginning fixture is called - to create a global client on port 4567. - 2- Any test that needs a client, would query the global scheduler that allows - communication through port 4567. - 3- At the end of the test, we shutdown any remaining work being done by any worker - in the client. This has a maximum 10 seconds timeout. The client object will afterwards - be empty and when pytest closes, it can safely delete the object without hanging. - - More info on this file can be found on: - https://docs.pytest.org/en/stable/writing_plugins.html#conftest-py-plugins + Create a dask client with two workers. + + Workers are in subprocesses to not create deadlocks with the pynisher and logging. """ dask.config.set({'distributed.worker.daemon': False}) - client = Client(n_workers=2, threads_per_worker=1, processes=False) + client = Client(n_workers=2, threads_per_worker=1, processes=True) print("Started Dask client={}\n".format(client)) def get_finalizer(address): @@ -163,6 +146,9 @@ def session_run_at_end(): def dask_client_single_worker(request): """ Same as above, but only with a single worker. + + Using this might cause deadlocks with the pynisher and the logging module. However, + it is used very rarely to avoid this issue as much as possible. """ dask.config.set({'distributed.worker.daemon': False}) diff --git a/test/test_ensemble_builder/ensemble_utils.py b/test/test_ensemble_builder/ensemble_utils.py index 653c5455ca..f0f68044e2 100644 --- a/test/test_ensemble_builder/ensemble_utils.py +++ b/test/test_ensemble_builder/ensemble_utils.py @@ -1,6 +1,7 @@ import os import shutil import unittest +import unittest.mock import numpy as np diff --git a/test/test_ensemble_builder/test_ensemble.py b/test/test_ensemble_builder/test_ensemble.py index 31aa1f4166..be51adc45e 100644 --- a/test/test_ensemble_builder/test_ensemble.py +++ b/test/test_ensemble_builder/test_ensemble.py @@ -732,7 +732,7 @@ def test_get_identifiers_from_run_history(exists, metric, ensemble_run_history, assert budget == 3.0 -def test_ensemble_builder_process_realrun(dask_client, ensemble_backend): +def test_ensemble_builder_process_realrun(dask_client_single_worker, ensemble_backend): manager = EnsembleBuilderManager( start_time=time.time(), time_left_for_ensembles=1000, @@ -750,7 +750,7 @@ def test_ensemble_builder_process_realrun(dask_client, ensemble_backend): ensemble_memory_limit=None, random_state=0, ) - manager.build_ensemble(dask_client) + manager.build_ensemble(dask_client_single_worker) future = manager.futures.pop() dask.distributed.wait([future]) # wait for the ensemble process to finish result = future.result() @@ -765,7 +765,11 @@ def test_ensemble_builder_process_realrun(dask_client, ensemble_backend): @unittest.mock.patch('autosklearn.ensemble_builder.EnsembleBuilder.fit_ensemble') -def test_ensemble_builder_nbest_remembered(fit_ensemble, ensemble_backend, dask_client): +def test_ensemble_builder_nbest_remembered( + fit_ensemble, + ensemble_backend, + dask_client_single_worker, +): """ Makes sure ensemble builder returns the size of the ensemble that pynisher allowed This way, we can remember it and not waste more time trying big ensemble sizes @@ -791,14 +795,14 @@ def test_ensemble_builder_nbest_remembered(fit_ensemble, ensemble_backend, dask_ max_iterations=None, ) - manager.build_ensemble(dask_client) + manager.build_ensemble(dask_client_single_worker) future = manager.futures[0] dask.distributed.wait([future]) # wait for the ensemble process to finish assert future.result() == ([], 5, None, None, None) file_path = os.path.join(ensemble_backend.internals_directory, 'ensemble_read_preds.pkl') assert not os.path.exists(file_path) - manager.build_ensemble(dask_client) + manager.build_ensemble(dask_client_single_worker) future = manager.futures[0] dask.distributed.wait([future]) # wait for the ensemble process to finish diff --git a/test/test_evaluation/test_evaluation.py b/test/test_evaluation/test_evaluation.py index a7197b6ec6..1cc098ea0b 100644 --- a/test/test_evaluation/test_evaluation.py +++ b/test/test_evaluation/test_evaluation.py @@ -87,7 +87,6 @@ def test_eval_with_limits_holdout(self, pynisher_mock): config.config_id = 198 ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1, resampling_strategy='holdout', - logger=self.logger, stats=self.stats, memory_limit=3072, metric=accuracy, @@ -107,7 +106,6 @@ def test_zero_or_negative_cutoff(self, pynisher_mock): config.config_id = 198 ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1, resampling_strategy='holdout', - logger=self.logger, stats=self.stats, metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), @@ -125,7 +123,6 @@ def test_cutoff_lower_than_remaining_time(self, pynisher_mock): config.config_id = 198 ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1, resampling_strategy='holdout', - logger=self.logger, stats=self.stats, metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), @@ -145,7 +142,6 @@ def test_eval_with_limits_holdout_fail_silent(self, pynisher_mock): config.config_id = 198 ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1, resampling_strategy='holdout', - logger=self.logger, stats=self.stats, memory_limit=3072, metric=accuracy, @@ -186,7 +182,6 @@ def test_eval_with_limits_holdout_fail_memory_error(self, pynisher_mock): config.config_id = 198 ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1, resampling_strategy='holdout', - logger=self.logger, stats=self.stats, memory_limit=3072, metric=log_loss, @@ -216,7 +211,6 @@ def test_eval_with_limits_holdout_fail_timeout(self, pynisher_mock): m2.wall_clock_time = 30 ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1, resampling_strategy='holdout', - logger=self.logger, stats=self.stats, memory_limit=3072, metric=accuracy, @@ -251,7 +245,6 @@ def side_effect(**kwargs): # Test for a succesful run ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1, resampling_strategy='holdout', - logger=self.logger, stats=self.stats, memory_limit=3072, metric=accuracy, @@ -274,7 +267,6 @@ def side_effect(**kwargs): m2.side_effect = side_effect ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1, resampling_strategy='holdout', - logger=self.logger, stats=self.stats, memory_limit=3072, metric=accuracy, @@ -301,7 +293,6 @@ def side_effect(*args, **kwargs): eval_houldout_mock.side_effect = side_effect ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1, resampling_strategy='holdout', - logger=self.logger, stats=self.stats, memory_limit=3072, metric=accuracy, @@ -325,7 +316,6 @@ def test_exception_in_target_function(self, eval_holdout_mock): eval_holdout_mock.side_effect = ValueError ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1, resampling_strategy='holdout', - logger=self.logger, stats=self.stats, memory_limit=3072, metric=accuracy, @@ -350,7 +340,6 @@ def test_silent_exception_in_target_function(self): ta = ExecuteTaFuncWithQueue(backend=backend_mock, autosklearn_seed=1, resampling_strategy='holdout', - logger=self.logger, stats=self.stats, memory_limit=3072, metric=accuracy, diff --git a/test/test_scripts/test_metadata_generation.py b/test/test_scripts/test_metadata_generation.py index 129d1f1513..30a8bfb8af 100644 --- a/test/test_scripts/test_metadata_generation.py +++ b/test/test_scripts/test_metadata_generation.py @@ -54,7 +54,7 @@ def test_metadata_generation(self): with open(commands_output_file) as fh: cmds = fh.read().split('\n') # 6 regression, 11 classification (roc_auc + task 258 is illegal), 1 empty line - self.assertEqual(len(cmds), 18) + self.assertEqual(len(cmds), 18, msg='\n'.join(cmds)) for task_id, task_type, metric in ( (classification_task_id, 'classification', 'accuracy'), diff --git a/testcommand.sh b/testcommand.sh index f74bc42d86..ffa49f76db 100644 --- a/testcommand.sh +++ b/testcommand.sh @@ -1,2 +1,2 @@ #!/usr/bin/env bash -pytest -n 8 --durations=300 --timeout=300 --dist load --timeout-method=thread -v $1 +pytest -n 3 --durations=20 --timeout=300 --dist load --timeout-method=thread -v $1