Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

🐞 Fix HPO #462

Merged
merged 2 commits into from
Aug 1, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
default_language_version:
node: system

repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
Expand Down
4 changes: 4 additions & 0 deletions tools/benchmarking/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
"""Benchmarking Tools."""

# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
12 changes: 8 additions & 4 deletions tools/hpo/sweep.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,14 +22,17 @@
from omegaconf import DictConfig, ListConfig, OmegaConf
from pytorch_lightning import seed_everything
from pytorch_lightning.loggers import WandbLogger
from utils import flatten_hpo_params
samet-akcay marked this conversation as resolved.
Show resolved Hide resolved

import wandb
from anomalib.config import get_configurable_parameters, update_input_size_config
from anomalib.data import get_datamodule
from anomalib.models import get_model
from anomalib.utils.sweep import flatten_sweep_params, set_in_nested_config

from .utils import flatten_hpo_params
from anomalib.utils.sweep import (
flatten_sweep_params,
get_sweep_callbacks,
set_in_nested_config,
)


class WandbSweep:
Expand Down Expand Up @@ -70,11 +73,12 @@ def sweep(self):

model = get_model(config)
datamodule = get_datamodule(config)
callbacks = get_sweep_callbacks(config)

# Disable saving checkpoints as all checkpoints from the sweep will get uploaded
config.trainer.checkpoint_callback = False

trainer = pl.Trainer(**config.trainer, logger=wandb_logger)
trainer = pl.Trainer(**config.trainer, logger=wandb_logger, callbacks=callbacks)
trainer.fit(model, datamodule=datamodule)


Expand Down