Skip to content

Commit

Permalink
Provided docstring for hyperparameter_optimization.py + Learn2_new.py…
Browse files Browse the repository at this point in the history
… small comments
  • Loading branch information
georgemilosh committed Jan 31, 2023
1 parent a19afec commit d1cd11e
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 6 deletions.
7 changes: 4 additions & 3 deletions PLASIM/Learn2_new.py
Original file line number Diff line number Diff line change
Expand Up @@ -1378,13 +1378,14 @@ def create_model(input_shape, conv_channels=[32,64,64], kernel_sizes=3, strides=
dropout to be applied after the BatchNormalization layer. If 0 no dropout is applied
max_pool_sizes : int or list of int, optional
size of max pooling layer to be applied after dropout. If 0 no max pool is applied
conv_l2coef : list of floats which encodes the values of L2 regularizers in convolutional layers, optional, defaults to None
dense_units : list of int, optional
number of neurons for each fully connected layer
dense_activations : str or list of str, optional
activation functions after each fully connected layer
dense_dropouts : float in [0,1] or list of floats in [0,1], optional
l2coef : list of floats which encodes the values of L2 regularizers in dense layers, optional
dense_l2coef : list of floats which encodes the values of L2 regularizers in dense layers, optional, defaults to None
Returns
-------
Expand Down Expand Up @@ -3026,12 +3027,12 @@ def main():

trainer_kwargs = get_default_params(Trainer)
trainer_kwargs.pop('config')
trainer_kwargs.pop('root_folder') # this two parameters cannot be changed
trainer_kwargs.pop('root_folder') # these two parameters cannot be changed
for k in trainer_kwargs:
if k in arg_dict:
trainer_kwargs[k] = arg_dict.pop(k)

# check if we want to import the parameters from another run
# check if we want to import the parameters from another run (see usage description in the beginnig of this file)
import_params_from = arg_dict.pop('import_params_from', None)
if import_params_from is not None:
runs = ut.json2dict('./runs.json')
Expand Down
43 changes: 40 additions & 3 deletions PLASIM/hyperparameter_optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,38 @@
# Created on 12 May 2022

# @author: Alessandro Lovo
# '''
'''
usage
-----
First you need to move the code to a desired folder by running
python hyperparameter_optimization.py <folder>
This will copy this code and its dependencies to your desired location and will create a config file from the default values in the functions specified in this module (Just like for Learn2_new.py).
`cd` into your folder and have a look at the config file, modify all the parameters you want BEFORE the first run, but AFTER the first successful run the config file becomes read-only. There is a reason for it, so don't try to modify it anyways!
When running the code you can specify some parameters to deviate from their default value, for example running inside
python hyperparameter_optimization.py n_trials=10
will run the code with all parameters at their default values but `n_trials` will select 10 trials for optuna to optimize the hyperparameters with (optuna will only be given 10 runs in this case)
Other parameters include:
study_name: (string)
The name of the study which tells optuna how to call the file storing the trials
count_pruned: (bool)
Whether optuna counts the runs which were pruned, i.e. the runs that were stopped because they looked not promising
config.json recommendations which overwrite Learn2_new.py defaults:
---------------------------
config["run_kwargs"]["k_fold_cross_val_kwargs"]["load_from"] = False,
config["run_kwargs"]["k_fold_cross_val_kwargs"]["prune_threshold"] = 0.25,
config["run_kwargs"]["k_fold_cross_val_kwargs"]["min_folds_before_pruning"] = 2,
config["run_kwargs"]["k_fold_cross_val_kwargs"]["train_model_kwargs"]["enable_early_stopping"] = True,
config["run_kwargs"]["k_fold_cross_val_kwargs"]["train_model_kwargs"]["early_stopping_kwargs"]["patience"] = 5,
config["run_kwargs"]["k_fold_cross_val_kwargs"]["optimal_checkpoint_kwargs"]["collective"] = False,
'''
from ast import arg, literal_eval
import Learn2_new as ln
logger = ln.logger
Expand Down Expand Up @@ -41,7 +72,7 @@ def objective(self, trial):
lr = literal_eval(f'{lr:.7f}') # limit the resolution of the learning rate
hyp['lr'] = lr
hyp['batch_size'] = trial.suggest_int('batch_size', 128, 2048, log=True)

"""
# convolutional layers
n_conv_layers = trial.suggest_int('n_conv_layers', 1, 4)
hyp['conv_channels'] = []
Expand All @@ -50,24 +81,30 @@ def objective(self, trial):
hyp['batch_normalizations'] = []
hyp['conv_dropouts'] = []
hyp['max_pool_sizes'] = []
hyp['conv_l2coef'] = []
for i in range(n_conv_layers):
hyp['conv_channels'].append(trial.suggest_int(f'conv_channels_{i+1}', 8, 128))
hyp['kernel_sizes'].append(trial.suggest_int(f'kernel_sizes_{i+1}', 2, 10))
hyp['strides'].append(trial.suggest_int(f'strides_{i+1}', 1, hyp['kernel_sizes'][-1]))
hyp['batch_normalizations'].append(trial.suggest_categorical(f'batch_normalizations_{i+1}', [True, False]))
hyp['conv_dropouts'].append(literal_eval(f"{trial.suggest_float(f'conv_dropouts_{i+1}', 0, 0.8, step=0.01):.2f}"))
hyp['max_pool_sizes'].append(trial.suggest_int(f'max_pool_sizes_{i+1}', 1, 4))
hyp['conv_l2coef'].append(literal_eval(f"{trial.suggest_float(f'conv_l2coef_{i+1}', 1e-6, 1e6, log=True):.7f}"))
# fully connected layers
n_dense_layers = trial.suggest_int('n_dense_layers', 1, 4)
hyp['dense_units'] = []
hyp['dense_activations'] = ['relu']*(n_dense_layers - 1) + [None]
hyp['dense_dropouts'] = []
hyp['dense_l2coef'] = []
for i in range(n_dense_layers - 1):
hyp['dense_units'].append(trial.suggest_int(f'dense_units_{i+1}', 8, 128))
hyp['dense_dropouts'].append(literal_eval(f"{trial.suggest_float(f'dense_dropouts_{i+1}', 0, 0.8, step=0.01):.2f}"))
hyp['dense_l2coef'].append(literal_eval(f"{trial.suggest_float(f'dense_l2coef_{i+1}', 1e-6, 1e6, log=True):.7f}"))
hyp['dense_units'].append(2)
hyp['dense_dropouts'].append(False)
"""

# remove arguments that remained empty lists (this facilitates commenting lines to remove kwargs to optimize)
kw_to_remove = []
Expand Down Expand Up @@ -156,7 +193,7 @@ def main():
if trainer.config_dict_flat['load_from'] is not None:
raise ValueError('load_from is not None!')

study_name = arg_dict.pop('study_name', 'study')
study_name = arg_dict.pop('study_name', 'study') # optuna stores its experiments in the file `{name}.db`
n_trials = arg_dict.pop('n_trials', None)
count_pruned = arg_dict.pop('count_pruned', True)
if not n_trials:
Expand Down

0 comments on commit d1cd11e

Please sign in to comment.