From 482db822727049d49eca8da21ca7049c5b6c0a34 Mon Sep 17 00:00:00 2001 From: Olivier Date: Tue, 22 Oct 2024 17:49:32 +0200 Subject: [PATCH 01/30] :shirt: Fix title in corruption tutorial --- auto_tutorials_source/tutorial_corruption.py | 1 + 1 file changed, 1 insertion(+) diff --git a/auto_tutorials_source/tutorial_corruption.py b/auto_tutorials_source/tutorial_corruption.py index 734e957c..287f048b 100644 --- a/auto_tutorials_source/tutorial_corruption.py +++ b/auto_tutorials_source/tutorial_corruption.py @@ -125,6 +125,7 @@ def show_images(transforms): # %% # 4. Other Corruptions +# ~~~~~~~~~~~~~~~~~~~~ from torch_uncertainty.transforms.corruption import ( Brightness, Contrast, Elastic, JPEGCompression, Pixelate) From 760dd23ebdb46593edfd7bf8afc22e7ecbfdd086 Mon Sep 17 00:00:00 2001 From: Olivier Date: Fri, 1 Nov 2024 17:12:33 +0100 Subject: [PATCH 02/30] :bug: Code now runs without glest --- pyproject.toml | 3 +-- torch_uncertainty/metrics/classification/grouping_loss.py | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index eddd4df8..0cb28608 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,6 @@ classifiers = [ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", - "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3 :: Only", ] dependencies = [ @@ -89,7 +88,7 @@ line-length = 80 target-version = "py310" lint.extend-select = [ "A", - "ARG", + "ARG", "B", "C4", "D", diff --git a/torch_uncertainty/metrics/classification/grouping_loss.py b/torch_uncertainty/metrics/classification/grouping_loss.py index bed2bd24..0a53ac0d 100644 --- a/torch_uncertainty/metrics/classification/grouping_loss.py +++ b/torch_uncertainty/metrics/classification/grouping_loss.py @@ -8,6 +8,7 @@ glest_installed = True else: # coverage: ignore glest_installed = False + GLEstimatorBase = object from torch import Tensor from torchmetrics import Metric From 86adf87d785ad5c30501136fd069eae334ae430d Mon Sep 17 00:00:00 2001 From: Olivier Date: Sat, 9 Nov 2024 12:14:22 +0100 Subject: [PATCH 03/30] :shirt: Improve the look of ECE's diagrams --- .../classification/calibration_error.py | 53 ++++++++++--------- 1 file changed, 27 insertions(+), 26 deletions(-) diff --git a/torch_uncertainty/metrics/classification/calibration_error.py b/torch_uncertainty/metrics/classification/calibration_error.py index e361d499..13fd9448 100644 --- a/torch_uncertainty/metrics/classification/calibration_error.py +++ b/torch_uncertainty/metrics/classification/calibration_error.py @@ -41,10 +41,10 @@ def _reliability_diagram_subplot( colors[:, 3] = alphas gap_plt = ax.bar( - positions, - np.abs(accuracies - confidences), - bottom=np.minimum(accuracies, confidences), - width=widths, + positions * 100, + np.abs(accuracies - confidences) * 100, + bottom=np.minimum(accuracies, confidences) * 100, + width=widths * 100, edgecolor=colors, color=colors, linewidth=1, @@ -52,40 +52,41 @@ def _reliability_diagram_subplot( ) acc_plt = ax.bar( - positions, + positions * 100, 0, - bottom=accuracies, - width=widths, + bottom=accuracies * 100, + width=widths * 100, edgecolor="black", color="black", alpha=1.0, - linewidth=3, + linewidth=2, label="Accuracy", ) ax.set_aspect("equal") - ax.plot([0, 1], [0, 1], linestyle="--", color="gray") + ax.plot([0, 100], [0, 100], linestyle="--", color="gray") gaps = np.abs(accuracies - confidences) - ece = (np.sum(gaps * bin_sizes) / np.sum(bin_sizes)) * 100 + ece = np.sum(gaps * bin_sizes) / np.sum(bin_sizes) ax.text( 0.98, 0.02, - f"ECE={ece:.03}%", + f"ECE={ece:.02%}", color="black", ha="right", va="bottom", transform=ax.transAxes, ) - ax.set_xlim(0, 1) - ax.set_ylim(0, 1) + ax.set_xlim(0, 100) + ax.set_ylim(0, 100) ax.set_title(title) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) + ax.grid(True, alpha=0.3, linestyle="--", zorder=0) ax.legend(handles=[gap_plt, acc_plt]) @@ -95,17 +96,18 @@ def _confidence_histogram_subplot( confidences: np.ndarray, title="Examples per bin", xlabel="Top-class Confidence (%)", - ylabel="Density", + ylabel="Density (%)", ) -> None: sns.kdeplot( - confidences, + confidences * 100, linewidth=2, ax=ax, fill=True, alpha=0.5, ) - ax.set_xlim(0, 1) + ax.set_xlim(0, 100) + ax.set_ylim(0, None) ax.set_title(title) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) @@ -114,19 +116,20 @@ def _confidence_histogram_subplot( avg_conf = np.mean(confidences) acc_plt = ax.axvline( - x=avg_acc, + x=avg_acc * 100, ls="solid", - lw=3, + lw=2, c="black", label="Accuracy", ) conf_plt = ax.axvline( - x=avg_conf, + x=avg_conf * 100, ls="dotted", - lw=3, + lw=2, c="#444", label="Avg. confidence", ) + ax.grid(True, alpha=0.3, linestyle="--", zorder=0) ax.legend(handles=[acc_plt, conf_plt], loc="upper left") @@ -139,7 +142,7 @@ def reliability_chart( bins: np.ndarray, title="Reliability Diagram", figsize=(6, 6), - dpi=72, + dpi=150, ) -> _PLOT_OUT_TYPE: """Builds Reliability Diagram `Source `_. @@ -170,11 +173,7 @@ def reliability_chart( # confidence histogram subplot _confidence_histogram_subplot(ax[1], accuracies, confidences, title="") - - new_ticks = np.abs(ax[1].get_yticks()).astype(np.int32) - ax[1].yaxis.set_major_locator(mticker.FixedLocator(new_ticks)) - ax[1].set_yticklabels(new_ticks) - + ax[1].yaxis.set_major_formatter(mticker.PercentFormatter(1.0)) return fig, ax @@ -240,6 +239,8 @@ def __new__( # type: ignore[misc] **kwargs: Any, ) -> Metric: """Initialize task metric.""" + if kwargs.get("n_bins") is not None: + raise ValueError("`n_bins` does not exist, use `num_bins`.") if adaptive: return AdaptiveCalibrationError( task=task, From f5ed45392ca591453b77ede01122d3a9894a8ade Mon Sep 17 00:00:00 2001 From: Olivier Date: Sat, 9 Nov 2024 12:56:32 +0100 Subject: [PATCH 04/30] :white_check_mark: Update tests --- tests/metrics/classification/test_calibration.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tests/metrics/classification/test_calibration.py b/tests/metrics/classification/test_calibration.py index ccc29ca2..0d6ad4d9 100644 --- a/tests/metrics/classification/test_calibration.py +++ b/tests/metrics/classification/test_calibration.py @@ -9,7 +9,7 @@ class TestCalibrationError: """Testing the CalibrationError metric class.""" def test_plot_binary(self) -> None: - metric = CalibrationError(task="binary", n_bins=2, norm="l1") + metric = CalibrationError(task="binary", num_bins=2, norm="l1") metric.update( torch.as_tensor([0.25, 0.25, 0.55, 0.75, 0.75]), torch.as_tensor([0, 0, 1, 1, 1]), @@ -19,7 +19,7 @@ def test_plot_binary(self) -> None: assert ax[0].get_xlabel() == "Top-class Confidence (%)" assert ax[0].get_ylabel() == "Success Rate (%)" assert ax[1].get_xlabel() == "Top-class Confidence (%)" - assert ax[1].get_ylabel() == "Density" + assert ax[1].get_ylabel() == "Density (%)" plt.close(fig) @@ -27,7 +27,7 @@ def test_plot_multiclass( self, ) -> None: metric = CalibrationError( - task="multiclass", n_bins=3, norm="l1", num_classes=3 + task="multiclass", num_bins=3, norm="l1", num_classes=3 ) metric.update( torch.as_tensor( @@ -45,12 +45,16 @@ def test_plot_multiclass( assert ax[0].get_xlabel() == "Top-class Confidence (%)" assert ax[0].get_ylabel() == "Success Rate (%)" assert ax[1].get_xlabel() == "Top-class Confidence (%)" - assert ax[1].get_ylabel() == "Density" + assert ax[1].get_ylabel() == "Density (%)" plt.close(fig) def test_errors(self) -> None: with pytest.raises(TypeError, match="is expected to be `int`"): CalibrationError(task="multiclass", num_classes=None) + with pytest.raises( + ValueError, match="`n_bins` does not exist, use `num_bins`." + ): + CalibrationError(task="multiclass", num_classes=2, n_bins=1) class TestAdaptiveCalibrationError: From fdfcaf2115a2deac476053e70345ff873f1a5de6 Mon Sep 17 00:00:00 2001 From: Olivier Date: Wed, 13 Nov 2024 11:53:41 +0100 Subject: [PATCH 05/30] :shirt: Reformat TUTrainer --- torch_uncertainty/utils/trainer.py | 95 +++++++++++++++++++++++++++++- 1 file changed, 92 insertions(+), 3 deletions(-) diff --git a/torch_uncertainty/utils/trainer.py b/torch_uncertainty/utils/trainer.py index 45ef8fc3..153f8ffa 100644 --- a/torch_uncertainty/utils/trainer.py +++ b/torch_uncertainty/utils/trainer.py @@ -1,16 +1,105 @@ +from collections.abc import Iterable +from pathlib import Path +from typing import Literal + from lightning.pytorch import Trainer +from lightning.pytorch.accelerators.accelerator import Accelerator +from lightning.pytorch.profilers import Profiler +from lightning.pytorch.strategies.strategy import Strategy from lightning.pytorch.trainer.states import ( RunningStage, TrainerFn, ) +from pytorch_lightning.callbacks import Callback +from pytorch_lightning.loggers import Logger from torch_uncertainty.utils.evaluation_loop import TUEvaluationLoop class TUTrainer(Trainer): - def __init__(self, inference_mode: bool = True, **kwargs) -> None: - super().__init__(inference_mode=inference_mode, **kwargs) - + def __init__( + self, + accelerator: str | Accelerator = "auto", + strategy: str | Strategy = "auto", + devices: list[int] | str | int = "auto", + num_nodes: int = 1, + precision: None | int | str = None, + logger: Logger | Iterable[Logger] | bool | None = None, + callbacks: list[Callback] | Callback | None = None, + fast_dev_run: int | bool = False, + max_epochs: int | None = None, + min_epochs: int | None = None, + max_steps: int = -1, + min_steps: int | None = None, + max_time: str | dict[str, int] | None = None, + limit_train_batches: float | None = None, + limit_val_batches: float | None = None, + limit_test_batches: float | None = None, + limit_predict_batches: float | None = None, + overfit_batches: float = 0, + val_check_interval: float | None = None, + check_val_every_n_epoch: int | None = 1, + num_sanity_val_steps: int | None = None, + log_every_n_steps: int | None = None, + enable_checkpointing: bool | None = None, + enable_progress_bar: bool | None = None, + enable_model_summary: bool | None = None, + accumulate_grad_batches: int = 1, + gradient_clip_val: float | None = None, + gradient_clip_algorithm: str | None = None, + deterministic: bool | None | Literal["warn"] = None, + benchmark: bool | None = None, + inference_mode: bool = True, + use_distributed_sampler: bool = True, + profiler: Profiler | str | None = None, + detect_anomaly: bool = False, + barebones: bool = False, + plugins=None, + sync_batchnorm: bool = False, + reload_dataloaders_every_n_epochs: int = 0, + default_root_dir: str | Path | None = None, + ) -> None: + super().__init__( + accelerator=accelerator, + strategy=strategy, + devices=devices, + num_nodes=num_nodes, + precision=precision, + logger=logger, + callbacks=callbacks, + fast_dev_run=fast_dev_run, + max_epochs=max_epochs, + min_epochs=min_epochs, + max_steps=max_steps, + min_steps=min_steps, + max_time=max_time, + limit_train_batches=limit_train_batches, + limit_val_batches=limit_val_batches, + limit_test_batches=limit_test_batches, + limit_predict_batches=limit_predict_batches, + overfit_batches=overfit_batches, + val_check_interval=val_check_interval, + check_val_every_n_epoch=check_val_every_n_epoch, + num_sanity_val_steps=num_sanity_val_steps, + log_every_n_steps=log_every_n_steps, + enable_checkpointing=enable_checkpointing, + enable_progress_bar=enable_progress_bar, + enable_model_summary=enable_model_summary, + accumulate_grad_batches=accumulate_grad_batches, + gradient_clip_val=gradient_clip_val, + gradient_clip_algorithm=gradient_clip_algorithm, + deterministic=deterministic, + benchmark=benchmark, + inference_mode=inference_mode, + use_distributed_sampler=use_distributed_sampler, + profiler=profiler, + detect_anomaly=detect_anomaly, + barebones=barebones, + plugins=plugins, + sync_batchnorm=sync_batchnorm, + reload_dataloaders_every_n_epochs=reload_dataloaders_every_n_epochs, + default_root_dir=default_root_dir, + ) self.test_loop = TUEvaluationLoop( self, TrainerFn.TESTING, From 1e23ebe3b30f145f94cdd6bfff840de50970423b Mon Sep 17 00:00:00 2001 From: Olivier Date: Wed, 13 Nov 2024 15:52:38 +0100 Subject: [PATCH 06/30] :bug: Use only lightning.pytorch --- torch_uncertainty/utils/trainer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torch_uncertainty/utils/trainer.py b/torch_uncertainty/utils/trainer.py index 153f8ffa..0fcdeec5 100644 --- a/torch_uncertainty/utils/trainer.py +++ b/torch_uncertainty/utils/trainer.py @@ -4,14 +4,14 @@ from lightning.pytorch import Trainer from lightning.pytorch.accelerators.accelerator import Accelerator +from lightning.pytorch.callbacks import Callback +from lightning.pytorch.loggers import Logger from lightning.pytorch.profilers import Profiler from lightning.pytorch.strategies.strategy import Strategy from lightning.pytorch.trainer.states import ( RunningStage, TrainerFn, ) -from pytorch_lightning.callbacks import Callback -from pytorch_lightning.loggers import Logger from torch_uncertainty.utils.evaluation_loop import TUEvaluationLoop From a1d1fcbe0c8021b9e1aeb3dbe4e8c320bf72f76d Mon Sep 17 00:00:00 2001 From: Olivier Date: Wed, 13 Nov 2024 16:00:29 +0100 Subject: [PATCH 07/30] :sparkles: Add binary classification metrics when needed --- torch_uncertainty/routines/classification.py | 67 +++++++++++--------- 1 file changed, 37 insertions(+), 30 deletions(-) diff --git a/torch_uncertainty/routines/classification.py b/torch_uncertainty/routines/classification.py index e76fc65a..b0c37385 100644 --- a/torch_uncertainty/routines/classification.py +++ b/torch_uncertainty/routines/classification.py @@ -182,36 +182,43 @@ def __init__( def _init_metrics(self) -> None: task = "binary" if self.binary_cls else "multiclass" - cls_metrics = MetricCollection( - { - "cls/Acc": Accuracy(task=task, num_classes=self.num_classes), - "cls/Brier": BrierScore(num_classes=self.num_classes), - "cls/NLL": CategoricalNLL(), - "cal/ECE": CalibrationError( - task=task, - num_bins=self.num_calibration_bins, - num_classes=self.num_classes, - ), - "cal/aECE": CalibrationError( - task=task, - adaptive=True, - num_bins=self.num_calibration_bins, - num_classes=self.num_classes, - ), - "sc/AURC": AURC(), - "sc/AUGRC": AUGRC(), - "sc/Cov@5Risk": CovAt5Risk(), - "sc/Risk@80Cov": RiskAt80Cov(), - }, - compute_groups=[ - ["cls/Acc"], - ["cls/Brier"], - ["cls/NLL"], - ["cal/ECE", "cal/aECE"], - ["sc/AURC", "sc/AUGRC", "sc/Cov@5Risk", "sc/Risk@80Cov"], - ], - ) + metrics_dict = { + "cls/Acc": Accuracy(task=task, num_classes=self.num_classes), + "cls/Brier": BrierScore(num_classes=self.num_classes), + "cls/NLL": CategoricalNLL(), + "cal/ECE": CalibrationError( + task=task, + num_bins=self.num_calibration_bins, + num_classes=self.num_classes, + ), + "cal/aECE": CalibrationError( + task=task, + adaptive=True, + num_bins=self.num_calibration_bins, + num_classes=self.num_classes, + ), + "sc/AURC": AURC(), + "sc/AUGRC": AUGRC(), + "sc/Cov@5Risk": CovAt5Risk(), + "sc/Risk@80Cov": RiskAt80Cov(), + } + groups = [ + ["cls/Acc"], + ["cls/Brier"], + ["cls/NLL"], + ["cal/ECE", "cal/aECE"], + ["sc/AURC", "sc/AUGRC", "sc/Cov@5Risk", "sc/Risk@80Cov"], + ] + if self.binary_cls: + metrics_dict |= { + "cls/AUROC": BinaryAUROC(), + "cls/AUPR": BinaryAveragePrecision(), + "cls/FRP95": FPR95(pos_label=1), + } + groups.extend([["cls/AUROC", "cls/AUPR"], ["cls/FRP95"]]) + + cls_metrics = MetricCollection(metrics_dict, compute_groups=groups) self.val_cls_metrics = cls_metrics.clone(prefix="val/") self.test_cls_metrics = cls_metrics.clone(prefix="test/") @@ -223,9 +230,9 @@ def _init_metrics(self) -> None: if self.eval_ood: ood_metrics = MetricCollection( { - "FPR95": FPR95(pos_label=1), "AUROC": BinaryAUROC(), "AUPR": BinaryAveragePrecision(), + "FPR95": FPR95(pos_label=1), }, compute_groups=[["AUROC", "AUPR"], ["FPR95"]], ) From a61b050078cd99ae6bc22d84f89e601d19ddbbc7 Mon Sep 17 00:00:00 2001 From: Olivier Date: Wed, 13 Nov 2024 16:01:06 +0100 Subject: [PATCH 08/30] :bug: Fix Brier Score for Binary cls. --- torch_uncertainty/metrics/classification/brier_score.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/torch_uncertainty/metrics/classification/brier_score.py b/torch_uncertainty/metrics/classification/brier_score.py index 43b12f2c..af3f490a 100644 --- a/torch_uncertainty/metrics/classification/brier_score.py +++ b/torch_uncertainty/metrics/classification/brier_score.py @@ -88,13 +88,10 @@ def update(self, probs: Tensor, target: Tensor) -> None: target (Tensor): A tensor of ground truth labels of shape (batch, num_classes) or (batch) """ - if target.ndim == 1: + if target.ndim == 1 and self.num_classes > 1: target = F.one_hot(target, self.num_classes) - if self.num_classes == 1: - probs = probs.unsqueeze(-1) - - if probs.ndim == 2: + if probs.ndim <= 2: batch_size = probs.size(0) elif probs.ndim == 3: batch_size = probs.size(0) From a71f4cfbda68292b487fa31faf1c7bb24f6ef36c Mon Sep 17 00:00:00 2001 From: Olivier Date: Wed, 13 Nov 2024 16:05:12 +0100 Subject: [PATCH 09/30] :sparkles: Add BCE with LS --- docs/source/api.rst | 1 + docs/source/references.rst | 11 +++++ torch_uncertainty/losses/__init__.py | 1 + torch_uncertainty/losses/classification.py | 47 ++++++++++++++++++++++ 4 files changed, 60 insertions(+) diff --git a/docs/source/api.rst b/docs/source/api.rst index 4f3e0177..5114f0e6 100644 --- a/docs/source/api.rst +++ b/docs/source/api.rst @@ -326,6 +326,7 @@ Losses ConfidencePenaltyLoss KLDiv ELBOLoss + BCEWithLogitsLossLS Post-Processing Methods ----------------------- diff --git a/docs/source/references.rst b/docs/source/references.rst index eb72cbee..b60f2d38 100644 --- a/docs/source/references.rst +++ b/docs/source/references.rst @@ -266,6 +266,17 @@ For the conflictual loss, consider citing: * Authors: *Mohammed Fellaji, Frédéric Pennerath, Brieuc Conan-Guez, and Miguel Couceiro* * Paper: `ArXiv 2024 `__. +Binary Cross-Entropy with Logits Loss with Label Smoothing +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +For the binary cross-entropy with logits loss with label smoothing, consider citing: + +**Rethinking the Inception Architecture for Computer Vision** + +* Authors: *Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, and Zbigniew Wojna* +* Paper: `CVPR 2016 `__. + + Metrics ------- diff --git a/torch_uncertainty/losses/__init__.py b/torch_uncertainty/losses/__init__.py index 2257e52f..7d8f10c9 100644 --- a/torch_uncertainty/losses/__init__.py +++ b/torch_uncertainty/losses/__init__.py @@ -1,6 +1,7 @@ # ruff: noqa: F401 from .bayesian import ELBOLoss, KLDiv from .classification import ( + BCEWithLogitsLossLS, ConfidencePenaltyLoss, ConflictualLoss, DECLoss, diff --git a/torch_uncertainty/losses/classification.py b/torch_uncertainty/losses/classification.py index 2d74d852..c2dfef43 100644 --- a/torch_uncertainty/losses/classification.py +++ b/torch_uncertainty/losses/classification.py @@ -336,3 +336,50 @@ def forward(self, x: Tensor, y: Tensor) -> Tensor: if self.reduction == "sum": return loss.sum() return loss + + +class BCEWithLogitsLossLS(nn.BCEWithLogitsLoss): + def __init__( + self, + weight: Tensor | None = None, + reduction: str = "mean", + label_smoothing: float = 0.0, + ) -> None: + """Binary Cross Entropy with Logits Loss with label smoothing. + + The original PyTorch implementation of the BCEWithLogitsLoss does not + support label smoothing. This implementation adds label smoothing to + the BCEWithLogitsLoss. + + Args: + weight (Tensor, optional): A manual rescaling weight given to the + loss of each batch element. If given, has to be a Tensor of size + "nbatch". Defaults to None. + reduction (str, optional): Specifies the reduction to apply to the + output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, + 'mean': the sum of the output will be divided by the number of + elements in the output, 'sum': the output will be summed. Defaults + to 'mean'. + label_smoothing (float, optional): The label smoothing factor. Defaults + to 0.0. + """ + super().__init__(weight, reduction) + self.label_smoothing = label_smoothing + + def forward(self, preds: Tensor, targets: Tensor) -> Tensor: + if self.label_smoothing == 0.0: + return super().forward(preds, targets) + targets = targets.float() + targets = ( + targets * (1 - self.label_smoothing) + self.label_smoothing / 2 + ) + loss = targets * F.logsigmoid(preds) + (1 - targets) * F.logsigmoid( + -preds + ) + if self.weight is not None: + loss = loss * self.weight + if self.reduction == "mean": + return -loss.mean() + if self.reduction == "sum": + return -loss.sum() + return -loss From cde29971e56712138b36b55bc515c37ab8c1a382 Mon Sep 17 00:00:00 2001 From: Olivier Date: Sun, 17 Nov 2024 20:40:06 +0100 Subject: [PATCH 10/30] :hammer: Rename UCI regression dm --- docs/source/api.rst | 2 +- .../regression/uci_datasets/deep_ensemble.py | 6 +++--- experiments/regression/uci_datasets/mlp.py | 4 ++-- tests/datamodules/test_uci_regression.py | 8 ++++---- torch_uncertainty/datamodules/__init__.py | 19 +++++++++++++------ .../datamodules/uci_regression.py | 2 +- 6 files changed, 24 insertions(+), 17 deletions(-) diff --git a/docs/source/api.rst b/docs/source/api.rst index 5114f0e6..b18bb813 100644 --- a/docs/source/api.rst +++ b/docs/source/api.rst @@ -387,7 +387,7 @@ Regression :nosignatures: :template: class.rst - UCIDataModule + UCIRegressionDataModule .. currentmodule:: torch_uncertainty.datamodules.segmentation diff --git a/experiments/regression/uci_datasets/deep_ensemble.py b/experiments/regression/uci_datasets/deep_ensemble.py index 6628e6e6..2a8bdc8f 100644 --- a/experiments/regression/uci_datasets/deep_ensemble.py +++ b/experiments/regression/uci_datasets/deep_ensemble.py @@ -2,10 +2,10 @@ from torch_uncertainty import cli_main, init_args from torch_uncertainty.baselines import DeepEnsemblesBaseline -from torch_uncertainty.datamodules import UCIDataModule +from torch_uncertainty.datamodules import UCIRegressionDataModule if __name__ == "__main__": - args = init_args(DeepEnsemblesBaseline, UCIDataModule) + args = init_args(DeepEnsemblesBaseline, UCIRegressionDataModule) if args.root == "./data/": root = Path(__file__).parent.absolute().parents[2] else: @@ -15,7 +15,7 @@ # datamodule args.root = str(root / "data") - dm = UCIDataModule(dataset_name="kin8nm", **vars(args)) + dm = UCIRegressionDataModule(dataset_name="kin8nm", **vars(args)) # model args.task = "regression" diff --git a/experiments/regression/uci_datasets/mlp.py b/experiments/regression/uci_datasets/mlp.py index 7c187673..54a9fafc 100644 --- a/experiments/regression/uci_datasets/mlp.py +++ b/experiments/regression/uci_datasets/mlp.py @@ -3,7 +3,7 @@ from torch_uncertainty import TULightningCLI from torch_uncertainty.baselines.regression import MLPBaseline -from torch_uncertainty.datamodules import UCIDataModule +from torch_uncertainty.datamodules import UCIRegressionDataModule class MLPCLI(TULightningCLI): @@ -12,7 +12,7 @@ def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None: def cli_main() -> MLPCLI: - return MLPCLI(MLPBaseline, UCIDataModule) + return MLPCLI(MLPBaseline, UCIRegressionDataModule) if __name__ == "__main__": diff --git a/tests/datamodules/test_uci_regression.py b/tests/datamodules/test_uci_regression.py index 1297666c..aeda20ea 100644 --- a/tests/datamodules/test_uci_regression.py +++ b/tests/datamodules/test_uci_regression.py @@ -1,14 +1,14 @@ from functools import partial from tests._dummies.dataset import DummyRegressionDataset -from torch_uncertainty.datamodules import UCIDataModule +from torch_uncertainty.datamodules import UCIRegressionDataModule -class TestUCIDataModule: - """Testing the UCIDataModule datamodule class.""" +class TestUCIRegressionDataModule: + """Testing the UCIRegressionDataModule datamodule class.""" def test_uci_regression(self): - dm = UCIDataModule( + dm = UCIRegressionDataModule( dataset_name="kin8nm", root="./data/", batch_size=128 ) diff --git a/torch_uncertainty/datamodules/__init__.py b/torch_uncertainty/datamodules/__init__.py index 670dfc4c..b4c9797d 100644 --- a/torch_uncertainty/datamodules/__init__.py +++ b/torch_uncertainty/datamodules/__init__.py @@ -1,9 +1,16 @@ # ruff: noqa: F401 from .abstract import TUDataModule -from .classification.cifar10 import CIFAR10DataModule -from .classification.cifar100 import CIFAR100DataModule -from .classification.imagenet import ImageNetDataModule -from .classification.mnist import MNISTDataModule -from .classification.tiny_imagenet import TinyImageNetDataModule +from .classification import ( + BankMarketingDataModule, + CIFAR10DataModule, + CIFAR100DataModule, + Dota2GamesDataModule, + HTRU2DataModule, + ImageNetDataModule, + MNISTDataModule, + OnlineShoppersDataModule, + SpamBaseDataModule, + TinyImageNetDataModule, +) from .segmentation import CamVidDataModule, CityscapesDataModule -from .uci_regression import UCIDataModule +from .uci_regression import UCIRegressionDataModule diff --git a/torch_uncertainty/datamodules/uci_regression.py b/torch_uncertainty/datamodules/uci_regression.py index a5cbe8af..4d1b304e 100644 --- a/torch_uncertainty/datamodules/uci_regression.py +++ b/torch_uncertainty/datamodules/uci_regression.py @@ -9,7 +9,7 @@ from .abstract import TUDataModule -class UCIDataModule(TUDataModule): +class UCIRegressionDataModule(TUDataModule): training_task = "regression" def __init__( From 60e76440e11def5f15048165c27210b75a6b839d Mon Sep 17 00:00:00 2001 From: Olivier Date: Sun, 17 Nov 2024 20:41:22 +0100 Subject: [PATCH 11/30] :fire: Remove useless code line --- torch_uncertainty/datamodules/classification/cifar10.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/torch_uncertainty/datamodules/classification/cifar10.py b/torch_uncertainty/datamodules/classification/cifar10.py index f33d4a01..bda1a948 100644 --- a/torch_uncertainty/datamodules/classification/cifar10.py +++ b/torch_uncertainty/datamodules/classification/cifar10.py @@ -77,7 +77,6 @@ def __init__( persistent_workers=persistent_workers, ) - self.val_split = val_split self.num_dataloaders = num_dataloaders self.eval_ood = eval_ood self.eval_shift = eval_shift @@ -174,7 +173,6 @@ def setup(self, stage: Literal["fit", "test"] | None = None) -> None: self.val_split, self.test_transform, ) - else: self.train = full self.val = self.dataset( From 3da7ce3561df254a3e63b669e843655a61a93d87 Mon Sep 17 00:00:00 2001 From: Olivier Date: Sun, 17 Nov 2024 20:42:05 +0100 Subject: [PATCH 12/30] :sparkles: Add UCI cls datasets --- .../datasets/classification/uci/__init__.py | 6 + .../classification/uci/bank_marketing.py | 99 ++++++++++++++ .../classification/uci/dota2_games.py | 100 ++++++++++++++ .../datasets/classification/uci/htru2.py | 70 ++++++++++ .../classification/uci/online_shoppers.py | 74 ++++++++++ .../datasets/classification/uci/spam_base.py | 70 ++++++++++ .../classification/uci/uci_classification.py | 129 ++++++++++++++++++ 7 files changed, 548 insertions(+) create mode 100644 torch_uncertainty/datasets/classification/uci/__init__.py create mode 100644 torch_uncertainty/datasets/classification/uci/bank_marketing.py create mode 100644 torch_uncertainty/datasets/classification/uci/dota2_games.py create mode 100644 torch_uncertainty/datasets/classification/uci/htru2.py create mode 100644 torch_uncertainty/datasets/classification/uci/online_shoppers.py create mode 100644 torch_uncertainty/datasets/classification/uci/spam_base.py create mode 100644 torch_uncertainty/datasets/classification/uci/uci_classification.py diff --git a/torch_uncertainty/datasets/classification/uci/__init__.py b/torch_uncertainty/datasets/classification/uci/__init__.py new file mode 100644 index 00000000..96f9894a --- /dev/null +++ b/torch_uncertainty/datasets/classification/uci/__init__.py @@ -0,0 +1,6 @@ +# ruff: noqa: F401 +from .bank_marketing import BankMarketing +from .dota2_games import Dota2Games +from .htru2 import HTRU2 +from .online_shoppers import OnlineShoppers +from .spam_base import SpamBase diff --git a/torch_uncertainty/datasets/classification/uci/bank_marketing.py b/torch_uncertainty/datasets/classification/uci/bank_marketing.py new file mode 100644 index 00000000..9f693d4f --- /dev/null +++ b/torch_uncertainty/datasets/classification/uci/bank_marketing.py @@ -0,0 +1,99 @@ +import logging +from collections.abc import Callable +from pathlib import Path + +import numpy as np +import pandas as pd +import torch +from torchvision.datasets.utils import ( + download_and_extract_archive, + extract_archive, +) + +from .uci_classification import UCIClassificationDataset + + +class BankMarketing(UCIClassificationDataset): + """The bank Marketing UCI classification dataset. + + Args: + root (str): Root directory of the datasets. + train (bool, optional): If True, creates dataset from training set, + otherwise creates from test set. + transform (callable, optional): A function/transform that takes in a + numpy array and returns a transformed version. + target_transform (callable, optional): A function/transform that takes + in the target and transforms it. + download (bool, optional): If true, downloads the dataset from the + internet and puts it in root directory. If dataset is already + downloaded, it is not downloaded again. + binary (bool, optional): Whether to use binary classification. Defaults + to ``True``. + + Note - License: + The licenses of the datasets may differ from TorchUncertainty's + license. Check before use. + + """ + + md5_zip = "3a3c6c4189975ea1f3040dbd60ad106c" + url = "https://archive.ics.uci.edu/static/public/222/bank+marketing.zip" + dataset_name = "bank+marketing" + filename = "bank-additional-full.csv" + num_features = 62 + + def __init__( + self, + root: Path | str, + transform: Callable | None = None, + target_transform: Callable | None = None, + binary: bool = True, + download: bool = False, + train: bool = True, + test_split: float = 0.2, + split_seed: int = 21893027, + ) -> None: + super().__init__( + root, + transform, + target_transform, + binary, + download, + train, + test_split, + split_seed, + ) + + def download(self) -> None: + """Download and extract dataset.""" + if self._check_integrity(): + logging.info("Files already downloaded and verified") + return + download_and_extract_archive( + self.url, + download_root=self.root, + filename="bank+marketing.zip", + md5=self.md5_zip, + ) + extract_archive( + self.root / "bank-additional.zip", self.root / "bank-marketing" + ) + + def _make_dataset(self) -> None: + """Create dataset from extracted files.""" + data = pd.read_csv( + self.root / "bank-marketing" / "bank-additional" / self.filename, + sep=";", + ) + data["y"] = np.where(data["y"] == "yes", 1, 0) + self.targets = torch.as_tensor(data["y"].values, dtype=torch.long) + + self.data = data.drop(columns=["y"]) + categorical_columns = self.data.select_dtypes(include="object").columns + for col in categorical_columns: + if self.data[col].nunique() == 2: + self.data[col] = np.where(self.data[col] == "yes", 1, 0) + self.data = torch.as_tensor( + pd.get_dummies(self.data).astype(float).values, dtype=torch.float32 + ) + self.num_features = self.data.shape[1] diff --git a/torch_uncertainty/datasets/classification/uci/dota2_games.py b/torch_uncertainty/datasets/classification/uci/dota2_games.py new file mode 100644 index 00000000..3daf57eb --- /dev/null +++ b/torch_uncertainty/datasets/classification/uci/dota2_games.py @@ -0,0 +1,100 @@ +import logging +from collections.abc import Callable +from pathlib import Path + +import numpy as np +import pandas as pd +import torch +from torchvision.datasets.utils import download_and_extract_archive + +from .uci_classification import UCIClassificationDataset + + +class Dota2Games(UCIClassificationDataset): + """The bank Marketing UCI classification dataset. + + Args: + root (str): Root directory of the datasets. + train (bool, optional): If True, creates dataset from training set, + otherwise creates from test set. + transform (callable, optional): A function/transform that takes in a + numpy array and returns a transformed version. + target_transform (callable, optional): A function/transform that takes + in the target and transforms it. + download (bool, optional): If true, downloads the dataset from the + internet and puts it in root directory. If dataset is already + downloaded, it is not downloaded again. + binary (bool, optional): Whether to use binary classification. Defaults + to ``True``. + + Note - License: + The licenses of the datasets may differ from TorchUncertainty's + license. Check before use. + + """ + + md5_zip = "896623c082b062f56b9c49c6c1fc0bf7" + url = ( + "https://archive.ics.uci.edu/static/public/367/dota2+games+results.zip" + ) + dataset_name = "dota2+games+results" + filename = "dota2Train.csv" + num_features = 116 + need_split = False + + def __init__( + self, + root: Path | str, + transform: Callable | None = None, + target_transform: Callable | None = None, + binary: bool = True, + download: bool = False, + train: bool = True, + test_split: float = 0.2, + split_seed: int = 21893027, + ) -> None: + super().__init__( + root, + transform, + target_transform, + binary, + download, + train, + test_split, + split_seed, + ) + + def download(self) -> None: + """Download and extract dataset.""" + if self._check_integrity(): + logging.info("Files already downloaded and verified") + return + download_and_extract_archive( + self.url, + download_root=self.root, + extract_root=self.root / "dota2_games", + filename="dota2+games+results.zip", + md5=self.md5_zip, + ) + + def _make_dataset(self) -> None: + """Create dataset from extracted files.""" + path = ( + self.root + / "dota2_games" + / ("dota2Train.csv" if self.train else "dota2Test.csv") + ) + + data = pd.read_csv(path, sep=",", header=None) + data[0] = np.where(data[0] == 1, 1, 0) + self.targets = torch.as_tensor(data[0].values, dtype=torch.long) + + self.data = data.drop(columns=[0]) + categorical_columns = self.data.select_dtypes(include="object").columns + for col in categorical_columns: + if self.data[col].nunique() == 2: + self.data[col] = np.where(self.data[col] == "yes", 1, 0) + self.data = torch.as_tensor( + pd.get_dummies(self.data).astype(float).values, dtype=torch.float32 + ) + self.num_features = self.data.shape[1] diff --git a/torch_uncertainty/datasets/classification/uci/htru2.py b/torch_uncertainty/datasets/classification/uci/htru2.py new file mode 100644 index 00000000..b161c720 --- /dev/null +++ b/torch_uncertainty/datasets/classification/uci/htru2.py @@ -0,0 +1,70 @@ +from collections.abc import Callable +from pathlib import Path + +import pandas as pd +import torch + +from .uci_classification import UCIClassificationDataset + + +class HTRU2(UCIClassificationDataset): + """The HTRU2 UCI classification dataset. + + Args: + root (str): Root directory of the datasets. + train (bool, optional): If True, creates dataset from training set, + otherwise creates from test set. + transform (callable, optional): A function/transform that takes in a + numpy array and returns a transformed version. + target_transform (callable, optional): A function/transform that takes + in the target and transforms it. + download (bool, optional): If true, downloads the dataset from the + internet and puts it in root directory. If dataset is already + downloaded, it is not downloaded again. + binary (bool, optional): Whether to use binary classification. Defaults + to ``True``. + + Note - License: + The licenses of the datasets may differ from TorchUncertainty's + license. Check before use. + + """ + + md5_zip = "1cfbf71c604debc06dedcbb6c1ccb43f" + url = "https://archive.ics.uci.edu/static/public/372/htru2.zip" + dataset_name = "htru2" + filename = "HTRU_2.csv" + num_features = 8 + + def __init__( + self, + root: Path | str, + transform: Callable | None = None, + target_transform: Callable | None = None, + binary: bool = True, + download: bool = False, + train: bool = True, + test_split: float = 0.2, + split_seed: int = 21893027, + ) -> None: + super().__init__( + root, + transform, + target_transform, + binary, + download, + train, + test_split, + split_seed, + ) + + def _make_dataset(self) -> None: + """Create dataset from extracted files.""" + data = pd.read_csv( + self.root / self.dataset_name / self.filename, sep=",", header=None + ) + self.targets = torch.as_tensor(data[8].values, dtype=torch.long) + self.data = torch.as_tensor( + data.drop(columns=[8]).values, dtype=torch.float32 + ) + self.num_features = self.data.shape[1] diff --git a/torch_uncertainty/datasets/classification/uci/online_shoppers.py b/torch_uncertainty/datasets/classification/uci/online_shoppers.py new file mode 100644 index 00000000..5a8b38a0 --- /dev/null +++ b/torch_uncertainty/datasets/classification/uci/online_shoppers.py @@ -0,0 +1,74 @@ +from collections.abc import Callable +from pathlib import Path + +import pandas as pd +import torch + +from .uci_classification import UCIClassificationDataset + + +class OnlineShoppers(UCIClassificationDataset): + """The Online Shoppers Intention UCI classification dataset. + + Args: + root (str): Root directory of the datasets. + train (bool, optional): If True, creates dataset from training set, + otherwise creates from test set. + transform (callable, optional): A function/transform that takes in a + numpy array and returns a transformed version. + target_transform (callable, optional): A function/transform that takes + in the target and transforms it. + download (bool, optional): If true, downloads the dataset from the + internet and puts it in root directory. If dataset is already + downloaded, it is not downloaded again. + binary (bool, optional): Whether to use binary classification. Defaults + to ``True``. + + Note - License: + The licenses of the datasets may differ from TorchUncertainty's + license. Check before use. + + """ + + md5_zip = "d835049e5f428f3b8cb8a6e6937f5537" + url = "https://archive.ics.uci.edu/static/public/468/online+shoppers+purchasing+intention+dataset.zip" + dataset_name = "online_shoppers" + filename = "online_shoppers_intention.csv" + num_features = 28 + + def __init__( + self, + root: Path | str, + transform: Callable | None = None, + target_transform: Callable | None = None, + binary: bool = True, + download: bool = False, + train: bool = True, + test_split: float = 0.2, + split_seed: int = 21893027, + ) -> None: + super().__init__( + root, + transform, + target_transform, + binary, + download, + train, + test_split, + split_seed, + ) + + def _make_dataset(self) -> None: + """Create dataset from extracted files.""" + data = pd.read_csv( + self.root / self.dataset_name / self.filename, + sep=",", + true_values=["TRUE"], + false_values=["FALSE"], + ) + self.targets = torch.as_tensor(data["Revenue"].values, dtype=torch.long) + + data = pd.get_dummies(data).astype(float) + data = data.drop(columns=["Revenue"]) + self.data = torch.as_tensor(data.values, dtype=torch.float32) + self.num_features = self.data.shape[1] diff --git a/torch_uncertainty/datasets/classification/uci/spam_base.py b/torch_uncertainty/datasets/classification/uci/spam_base.py new file mode 100644 index 00000000..159b5416 --- /dev/null +++ b/torch_uncertainty/datasets/classification/uci/spam_base.py @@ -0,0 +1,70 @@ +from collections.abc import Callable +from pathlib import Path + +import pandas as pd +import torch + +from .uci_classification import UCIClassificationDataset + + +class SpamBase(UCIClassificationDataset): + """The SpamBase UCI classification dataset. + + Args: + root (str): Root directory of the datasets. + train (bool, optional): If True, creates dataset from training set, + otherwise creates from test set. + transform (callable, optional): A function/transform that takes in a + numpy array and returns a transformed version. + target_transform (callable, optional): A function/transform that takes + in the target and transforms it. + download (bool, optional): If true, downloads the dataset from the + internet and puts it in root directory. If dataset is already + downloaded, it is not downloaded again. + binary (bool, optional): Whether to use binary classification. Defaults + to ``True``. + + Note - License: + The licenses of the datasets may differ from TorchUncertainty's + license. Check before use. + + """ + + md5_zip = "6159c57c5571b3c20218e32fc94e8e91" + url = "https://archive.ics.uci.edu/static/public/94/spambase.zip" + dataset_name = "spambase" + filename = "spambase.data" + num_features = 57 + + def __init__( + self, + root: Path | str, + transform: Callable | None = None, + target_transform: Callable | None = None, + binary: bool = True, + download: bool = False, + train: bool = True, + test_split: float = 0.2, + split_seed: int = 21893027, + ) -> None: + super().__init__( + root, + transform, + target_transform, + binary, + download, + train, + test_split, + split_seed, + ) + + def _make_dataset(self) -> None: + """Create dataset from extracted files.""" + data = pd.read_csv( + self.root / self.dataset_name / self.filename, sep=",", header=None + ) + self.targets = torch.as_tensor(data[57].values, dtype=torch.long) + self.data = torch.as_tensor( + data.drop(columns=[57]).values, dtype=torch.float32 + ) + self.num_features = self.data.shape[1] diff --git a/torch_uncertainty/datasets/classification/uci/uci_classification.py b/torch_uncertainty/datasets/classification/uci/uci_classification.py new file mode 100644 index 00000000..0439c49c --- /dev/null +++ b/torch_uncertainty/datasets/classification/uci/uci_classification.py @@ -0,0 +1,129 @@ +import logging +from abc import ABC, abstractmethod +from collections.abc import Callable +from pathlib import Path + +import torch +from torch import Generator +from torch.utils.data import Dataset +from torchvision.datasets.utils import ( + check_integrity, + download_and_extract_archive, +) + + +class UCIClassificationDataset(ABC, Dataset): + """The UCI classification dataset base class. + + Args: + root (str): Root directory of the datasets. + train (bool, optional): If True, creates dataset from training set, + otherwise creates from test set. + transform (callable, optional): A function/transform that takes in a + numpy array and returns a transformed version. + target_transform (callable, optional): A function/transform that takes + in the target and transforms it. + download (bool, optional): If true, downloads the dataset from the + internet and puts it in root directory. If dataset is already + downloaded, it is not downloaded again. + binary (bool, optional): Whether to use binary classification. Defaults + to ``True``. + + Note - License: + The licenses of the datasets may differ from TorchUncertainty's + license. Check before use. + + """ + + md5_zip: str = "" + url: str = "" + filename: str = "" + dataset_name: str = "" + need_split = True + apply_standardization = True + + def __init__( + self, + root: Path | str, + transform: Callable | None = None, + target_transform: Callable | None = None, + binary: bool = True, + download: bool = False, + train: bool = True, + test_split: float = 0.2, + split_seed: int = 21893027, + ) -> None: + super().__init__() + self.root = Path(root) + self.train = train + self.transform = transform + self.target_transform = target_transform + + if download: + self.download() + + self._make_dataset() + if self.apply_standardization: + self._compute_statistics() + self._standardize() + + if self.need_split: + self.gen = Generator().manual_seed(split_seed) + + part = 1 - test_split if train else test_split + self.split_idx = torch.ones(len(self)).multinomial( + num_samples=int(part * len(self)), + replacement=False, + generator=self.gen, + ) + self.data = self.data[self.split_idx] + self.targets = self.targets[self.split_idx] + if not binary: + self.targets = torch.nn.functional.one_hot( + self.targets, num_classes=2 + ) + + def __len__(self) -> int: + """Get the length of the dataset.""" + return self.data.shape[0] + + def _check_integrity(self) -> bool: + """Check the integrity of the dataset(s).""" + return check_integrity( + self.root / Path(self.dataset_name + ".zip"), + self.md5_zip, + ) + + def _standardize(self) -> None: + self.data = (self.data - self.data_mean) / self.data_std + + def _compute_statistics(self) -> None: + self.data_mean = self.data.mean(dim=0) + self.data_std = self.data.std(dim=0) + self.data_std[self.data_std == 0] = 1 + + def download(self) -> None: + """Download and extract dataset.""" + if self._check_integrity(): + logging.info("Files already downloaded and verified") + return + download_and_extract_archive( + self.url, + download_root=self.root / self.dataset_name, + filename=self.filename + ".zip", + md5=self.md5_zip, + ) + + @abstractmethod + def _make_dataset(self) -> None: + """Create dataset from extracted files.""" + + def __getitem__(self, index: int) -> tuple[torch.Tensor, torch.Tensor]: + """Get sample and target for a given index.""" + data = self.data[index, :] + if self.transform is not None: + data = self.transform(data) + target = self.targets[index] + if self.target_transform is not None: + target = self.target_transform(target) + return data, target From 0ef69cea6e7a483e7dbe8db62aeef057e0faf019 Mon Sep 17 00:00:00 2001 From: Olivier Date: Sun, 17 Nov 2024 20:42:53 +0100 Subject: [PATCH 13/30] :sparkles: Add UCI cls dm & forgotten init --- .../datamodules/classification/__init__.py | 13 +++ .../classification/uci/__init__.py | 7 ++ .../classification/uci/bank_marketing.py | 48 ++++++++++ .../classification/uci/dota2_games.py | 48 ++++++++++ .../datamodules/classification/uci/htru2.py | 48 ++++++++++ .../classification/uci/online_shoppers.py | 48 ++++++++++ .../classification/uci/spam_base.py | 48 ++++++++++ .../classification/uci/uci_classification.py | 95 +++++++++++++++++++ 8 files changed, 355 insertions(+) create mode 100644 torch_uncertainty/datamodules/classification/__init__.py create mode 100644 torch_uncertainty/datamodules/classification/uci/__init__.py create mode 100644 torch_uncertainty/datamodules/classification/uci/bank_marketing.py create mode 100644 torch_uncertainty/datamodules/classification/uci/dota2_games.py create mode 100644 torch_uncertainty/datamodules/classification/uci/htru2.py create mode 100644 torch_uncertainty/datamodules/classification/uci/online_shoppers.py create mode 100644 torch_uncertainty/datamodules/classification/uci/spam_base.py create mode 100644 torch_uncertainty/datamodules/classification/uci/uci_classification.py diff --git a/torch_uncertainty/datamodules/classification/__init__.py b/torch_uncertainty/datamodules/classification/__init__.py new file mode 100644 index 00000000..55cbc498 --- /dev/null +++ b/torch_uncertainty/datamodules/classification/__init__.py @@ -0,0 +1,13 @@ +# ruff: noqa: F401 +from .cifar10 import CIFAR10DataModule +from .cifar100 import CIFAR100DataModule +from .imagenet import ImageNetDataModule +from .mnist import MNISTDataModule +from .tiny_imagenet import TinyImageNetDataModule +from .uci import ( + BankMarketingDataModule, + Dota2GamesDataModule, + HTRU2DataModule, + OnlineShoppersDataModule, + SpamBaseDataModule, +) diff --git a/torch_uncertainty/datamodules/classification/uci/__init__.py b/torch_uncertainty/datamodules/classification/uci/__init__.py new file mode 100644 index 00000000..24d2cb38 --- /dev/null +++ b/torch_uncertainty/datamodules/classification/uci/__init__.py @@ -0,0 +1,7 @@ +# ruff: noqa: F401 +from .bank_marketing import BankMarketingDataModule +from .dota2_games import Dota2GamesDataModule +from .htru2 import HTRU2DataModule +from .online_shoppers import OnlineShoppersDataModule +from .spam_base import SpamBaseDataModule +from .uci_classification import UCIClassificationDataModule diff --git a/torch_uncertainty/datamodules/classification/uci/bank_marketing.py b/torch_uncertainty/datamodules/classification/uci/bank_marketing.py new file mode 100644 index 00000000..2f403205 --- /dev/null +++ b/torch_uncertainty/datamodules/classification/uci/bank_marketing.py @@ -0,0 +1,48 @@ +from pathlib import Path + +from torch_uncertainty_ls.datasets import BankMarketing + +from .uci_classification import UCIClassificationDataModule + + +class BankMarketingDataModule(UCIClassificationDataModule): + def __init__( + self, + root: str | Path, + batch_size: int, + val_split: float = 0.0, + test_split: float = 0.2, + num_workers: int = 1, + pin_memory: bool = True, + persistent_workers: bool = True, + binary: bool = True, + ) -> None: + """The Bank Marketing UCI classification datamodule. + + Args: + root (string): Root directory of the datasets. + batch_size (int): The batch size for training and testing. + val_split (float, optional): Share of validation samples among the + non-test samples. Defaults to ``0``. + test_split (float, optional): Share of test samples. Defaults to ``0.2``. + num_workers (int, optional): How many subprocesses to use for data + loading. Defaults to ``1``. + pin_memory (bool, optional): Whether to pin memory in the GPU. Defaults + to ``True``. + persistent_workers (bool, optional): Whether to use persistent workers. + Defaults to ``True``. + binary (bool, optional): Whether to use binary classification. Defaults + to ``True``. + + """ + super().__init__( + root=root, + dataset=BankMarketing, + batch_size=batch_size, + val_split=val_split, + test_split=test_split, + num_workers=num_workers, + pin_memory=pin_memory, + persistent_workers=persistent_workers, + binary=binary, + ) diff --git a/torch_uncertainty/datamodules/classification/uci/dota2_games.py b/torch_uncertainty/datamodules/classification/uci/dota2_games.py new file mode 100644 index 00000000..ef485349 --- /dev/null +++ b/torch_uncertainty/datamodules/classification/uci/dota2_games.py @@ -0,0 +1,48 @@ +from pathlib import Path + +from torch_uncertainty_ls.datasets import Dota2Games + +from .uci_classification import UCIClassificationDataModule + + +class Dota2GamesDataModule(UCIClassificationDataModule): + def __init__( + self, + root: str | Path, + batch_size: int, + val_split: float = 0.0, + test_split: float = 0.2, + num_workers: int = 1, + pin_memory: bool = True, + persistent_workers: bool = True, + binary: bool = True, + ) -> None: + """The Dota2 Games UCI classification datamodule. + + Args: + root (string): Root directory of the datasets. + batch_size (int): The batch size for training and testing. + val_split (float, optional): Share of validation samples among the + non-test samples. Defaults to ``0``. + test_split (float, optional): Share of test samples. Defaults to ``0.2``. + num_workers (int, optional): How many subprocesses to use for data + loading. Defaults to ``1``. + pin_memory (bool, optional): Whether to pin memory in the GPU. Defaults + to ``True``. + persistent_workers (bool, optional): Whether to use persistent workers. + Defaults to ``True``. + binary (bool, optional): Whether to use binary classification. Defaults + to ``True``. + + """ + super().__init__( + root=root, + dataset=Dota2Games, + batch_size=batch_size, + val_split=val_split, + test_split=test_split, + num_workers=num_workers, + pin_memory=pin_memory, + persistent_workers=persistent_workers, + binary=binary, + ) diff --git a/torch_uncertainty/datamodules/classification/uci/htru2.py b/torch_uncertainty/datamodules/classification/uci/htru2.py new file mode 100644 index 00000000..fdef078f --- /dev/null +++ b/torch_uncertainty/datamodules/classification/uci/htru2.py @@ -0,0 +1,48 @@ +from pathlib import Path + +from torch_uncertainty_ls.datasets import HTRU2 + +from .uci_classification import UCIClassificationDataModule + + +class HTRU2DataModule(UCIClassificationDataModule): + def __init__( + self, + root: str | Path, + batch_size: int, + val_split: float = 0.0, + test_split: float = 0.2, + num_workers: int = 1, + pin_memory: bool = True, + persistent_workers: bool = True, + binary: bool = True, + ) -> None: + """The HTRU2 UCI classification datamodule. + + Args: + root (string): Root directory of the datasets. + batch_size (int): The batch size for training and testing. + val_split (float, optional): Share of validation samples among the + non-test samples. Defaults to ``0``. + test_split (float, optional): Share of test samples. Defaults to ``0.2``. + num_workers (int, optional): How many subprocesses to use for data + loading. Defaults to ``1``. + pin_memory (bool, optional): Whether to pin memory in the GPU. Defaults + to ``True``. + persistent_workers (bool, optional): Whether to use persistent workers. + Defaults to ``True``. + binary (bool, optional): Whether to use binary classification. Defaults + to ``True``. + + """ + super().__init__( + root=root, + dataset=HTRU2, + batch_size=batch_size, + val_split=val_split, + test_split=test_split, + num_workers=num_workers, + pin_memory=pin_memory, + persistent_workers=persistent_workers, + binary=binary, + ) diff --git a/torch_uncertainty/datamodules/classification/uci/online_shoppers.py b/torch_uncertainty/datamodules/classification/uci/online_shoppers.py new file mode 100644 index 00000000..8561b5cf --- /dev/null +++ b/torch_uncertainty/datamodules/classification/uci/online_shoppers.py @@ -0,0 +1,48 @@ +from pathlib import Path + +from torch_uncertainty_ls.datasets import OnlineShoppers + +from .uci_classification import UCIClassificationDataModule + + +class OnlineShoppersDataModule(UCIClassificationDataModule): + def __init__( + self, + root: str | Path, + batch_size: int, + val_split: float = 0.0, + test_split: float = 0.2, + num_workers: int = 1, + pin_memory: bool = True, + persistent_workers: bool = True, + binary: bool = True, + ) -> None: + """The online shoppers intention UCI classification datamodule. + + Args: + root (string): Root directory of the datasets. + batch_size (int): The batch size for training and testing. + val_split (float, optional): Share of validation samples among the + non-test samples. Defaults to ``0``. + test_split (float, optional): Share of test samples. Defaults to ``0.2``. + num_workers (int, optional): How many subprocesses to use for data + loading. Defaults to ``1``. + pin_memory (bool, optional): Whether to pin memory in the GPU. Defaults + to ``True``. + persistent_workers (bool, optional): Whether to use persistent workers. + Defaults to ``True``. + binary (bool, optional): Whether to use binary classification. Defaults + to ``True``. + + """ + super().__init__( + root=root, + dataset=OnlineShoppers, + batch_size=batch_size, + val_split=val_split, + test_split=test_split, + num_workers=num_workers, + pin_memory=pin_memory, + persistent_workers=persistent_workers, + binary=binary, + ) diff --git a/torch_uncertainty/datamodules/classification/uci/spam_base.py b/torch_uncertainty/datamodules/classification/uci/spam_base.py new file mode 100644 index 00000000..9bc2897e --- /dev/null +++ b/torch_uncertainty/datamodules/classification/uci/spam_base.py @@ -0,0 +1,48 @@ +from pathlib import Path + +from torch_uncertainty_ls.datasets import SpamBase + +from .uci_classification import UCIClassificationDataModule + + +class SpamBaseDataModule(UCIClassificationDataModule): + def __init__( + self, + root: str | Path, + batch_size: int, + val_split: float = 0.0, + test_split: float = 0.2, + num_workers: int = 1, + pin_memory: bool = True, + persistent_workers: bool = True, + binary: bool = True, + ) -> None: + """The Bank Marketing UCI classification datamodule. + + Args: + root (string): Root directory of the datasets. + batch_size (int): The batch size for training and testing. + val_split (float, optional): Share of validation samples among the + non-test samples. Defaults to ``0``. + test_split (float, optional): Share of test samples. Defaults to ``0.2``. + num_workers (int, optional): How many subprocesses to use for data + loading. Defaults to ``1``. + pin_memory (bool, optional): Whether to pin memory in the GPU. Defaults + to ``True``. + persistent_workers (bool, optional): Whether to use persistent workers. + Defaults to ``True``. + binary (bool, optional): Whether to use binary classification. Defaults + to ``True``. + + """ + super().__init__( + root=root, + dataset=SpamBase, + batch_size=batch_size, + val_split=val_split, + test_split=test_split, + num_workers=num_workers, + pin_memory=pin_memory, + persistent_workers=persistent_workers, + binary=binary, + ) diff --git a/torch_uncertainty/datamodules/classification/uci/uci_classification.py b/torch_uncertainty/datamodules/classification/uci/uci_classification.py new file mode 100644 index 00000000..40ed06a0 --- /dev/null +++ b/torch_uncertainty/datamodules/classification/uci/uci_classification.py @@ -0,0 +1,95 @@ +from pathlib import Path + +from torch.utils.data import Dataset + +from torch_uncertainty.datamodules.abstract import TUDataModule +from torch_uncertainty.utils import create_train_val_split + + +class UCIClassificationDataModule(TUDataModule): + training_task = "classification" + + def __init__( + self, + root: str | Path, + dataset: type[Dataset], + batch_size: int, + val_split: float = 0.0, + test_split: float = 0.2, + num_workers: int = 1, + pin_memory: bool = True, + persistent_workers: bool = True, + binary: bool = True, + ) -> None: + """The UCI classification datamodule base class. + + Args: + root (string): Root directory of the datasets. + dataset (type[Dataset]): The UCI classification dataset class. + batch_size (int): The batch size for training and testing. + val_split (float, optional): Share of validation samples among the + non-test samples. Defaults to ``0``. + test_split (float, optional): Share of test samples. Defaults to ``0.2``. + num_workers (int, optional): How many subprocesses to use for data + loading. Defaults to ``1``. + pin_memory (bool, optional): Whether to pin memory in the GPU. Defaults + to ``True``. + persistent_workers (bool, optional): Whether to use persistent workers. + Defaults to ``True``. + binary (bool, optional): Whether to use binary classification. Defaults + to ``True``. + + """ + super().__init__( + root=root, + batch_size=batch_size, + val_split=val_split, + num_workers=num_workers, + pin_memory=pin_memory, + persistent_workers=persistent_workers, + ) + + self.dataset = dataset + self.test_split = test_split + self.binary = binary + + def prepare_data(self) -> None: + """Download the dataset.""" + self.dataset(root=self.root, download=True) + + # ruff: noqa: ARG002 + def setup(self, stage: str | None = None) -> None: + """Split the datasets into train, val, and test.""" + if stage == "fit" or stage is None: + full = self.dataset( + self.root, + train=True, + download=False, + binary=self.binary, + test_split=self.test_split, + ) + + if self.val_split: + self.train, self.val = create_train_val_split( + full, + self.val_split, + ) + else: + self.train = full + self.val = self.dataset( + self.root, + train=False, + download=False, + binary=self.binary, + test_split=self.test_split, + ) + if stage == "test" or stage is None: + self.test = self.dataset( + self.root, + train=False, + download=False, + binary=self.binary, + test_split=self.test_split, + ) + if stage not in ["fit", "test", None]: + raise ValueError(f"Stage {stage} is not supported.") From 24aef4dec6771439eb95db7a98f46348ec1b8ca5 Mon Sep 17 00:00:00 2001 From: Olivier Date: Sun, 17 Nov 2024 20:46:43 +0100 Subject: [PATCH 14/30] :fire: Remove torchinfo dep. --- pyproject.toml | 1 - tests/baselines/test_batched.py | 4 ---- tests/baselines/test_masked.py | 4 ---- tests/baselines/test_mc_dropout.py | 4 ---- tests/baselines/test_mimo.py | 4 ---- tests/baselines/test_packed.py | 8 -------- tests/baselines/test_standard.py | 8 -------- 7 files changed, 33 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 0cb28608..eb4f3339 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,7 +36,6 @@ dependencies = [ "torchvision>=0.16", "tensorboard", "einops", - "torchinfo", "huggingface-hub", "scikit-learn", "matplotlib", diff --git a/tests/baselines/test_batched.py b/tests/baselines/test_batched.py index ef208523..238427b4 100644 --- a/tests/baselines/test_batched.py +++ b/tests/baselines/test_batched.py @@ -1,6 +1,5 @@ import torch from torch import nn -from torchinfo import summary from torch_uncertainty.baselines.classification import ( ResNetBaseline, @@ -23,7 +22,6 @@ def test_batched_18(self): groups=1, ) - summary(net) _ = net(torch.rand(1, 3, 32, 32)) def test_batched_50(self): @@ -38,7 +36,6 @@ def test_batched_50(self): groups=1, ) - summary(net) _ = net(torch.rand(1, 3, 40, 40)) @@ -56,5 +53,4 @@ def test_batched(self): groups=1, ) - summary(net) _ = net(torch.rand(1, 3, 32, 32)) diff --git a/tests/baselines/test_masked.py b/tests/baselines/test_masked.py index 3fd48ebf..40c11ce7 100644 --- a/tests/baselines/test_masked.py +++ b/tests/baselines/test_masked.py @@ -1,7 +1,6 @@ import pytest import torch from torch import nn -from torchinfo import summary from torch_uncertainty.baselines.classification import ( ResNetBaseline, @@ -25,7 +24,6 @@ def test_masked_18(self): groups=1, ) - summary(net) _ = net(torch.rand(1, 3, 32, 32)) def test_masked_50(self): @@ -41,7 +39,6 @@ def test_masked_50(self): groups=1, ) - summary(net) _ = net(torch.rand(1, 3, 40, 40)) def test_masked_errors(self): @@ -87,5 +84,4 @@ def test_masked(self): groups=1, ) - summary(net) _ = net(torch.rand(1, 3, 32, 32)) diff --git a/tests/baselines/test_mc_dropout.py b/tests/baselines/test_mc_dropout.py index dca61c3c..d397f7f3 100644 --- a/tests/baselines/test_mc_dropout.py +++ b/tests/baselines/test_mc_dropout.py @@ -1,6 +1,5 @@ import torch from torch import nn -from torchinfo import summary from torch_uncertainty.baselines.classification import ( ResNetBaseline, @@ -24,7 +23,6 @@ def test_standard(self): style="cifar", groups=1, ) - summary(net) net(torch.rand(1, 3, 32, 32)) @@ -42,7 +40,6 @@ def test_standard(self): style="cifar", groups=1, ) - summary(net) net(torch.rand(1, 3, 32, 32)) @@ -61,7 +58,6 @@ def test_standard(self): groups=1, last_layer_dropout=True, ) - summary(net) net(torch.rand(1, 3, 32, 32)) net = VGGBaseline( diff --git a/tests/baselines/test_mimo.py b/tests/baselines/test_mimo.py index 18c83a08..57c7a4a4 100644 --- a/tests/baselines/test_mimo.py +++ b/tests/baselines/test_mimo.py @@ -1,6 +1,5 @@ import torch from torch import nn -from torchinfo import summary from torch_uncertainty.baselines.classification import ( ResNetBaseline, @@ -25,7 +24,6 @@ def test_mimo_50(self): groups=1, ).eval() - summary(net) _ = net(torch.rand(1, 3, 32, 32)) def test_mimo_18(self): @@ -42,7 +40,6 @@ def test_mimo_18(self): groups=2, ).eval() - summary(net) _ = net(torch.rand(1, 3, 40, 40)) @@ -62,5 +59,4 @@ def test_mimo(self): groups=1, ).eval() - summary(net) _ = net(torch.rand(1, 3, 32, 32)) diff --git a/tests/baselines/test_packed.py b/tests/baselines/test_packed.py index c8331119..49e02adf 100644 --- a/tests/baselines/test_packed.py +++ b/tests/baselines/test_packed.py @@ -1,7 +1,6 @@ import pytest import torch from torch import nn -from torchinfo import summary from torch_uncertainty.baselines.classification import ( ResNetBaseline, @@ -28,8 +27,6 @@ def test_packed_50(self): groups=1, ) - summary(net) - _ = net(torch.rand(1, 3, 32, 32)) def test_packed_18(self): @@ -46,7 +43,6 @@ def test_packed_18(self): groups=2, ) - summary(net) _ = net(torch.rand(1, 3, 40, 40)) def test_packed_exception(self): @@ -95,7 +91,6 @@ def test_packed(self): groups=1, ) - summary(net) _ = net(torch.rand(1, 3, 32, 32)) @@ -114,8 +109,6 @@ def test_packed(self): gamma=1, groups=1, ) - - summary(net) _ = net(torch.rand(2, 3, 32, 32)) @@ -133,5 +126,4 @@ def test_packed(self): alpha=2, gamma=1, ) - summary(net) _ = net(torch.rand(1, 3)) diff --git a/tests/baselines/test_standard.py b/tests/baselines/test_standard.py index 6db1c03b..f97a527c 100644 --- a/tests/baselines/test_standard.py +++ b/tests/baselines/test_standard.py @@ -1,7 +1,6 @@ import pytest import torch from torch import nn -from torchinfo import summary from torch_uncertainty.baselines.classification import ( ResNetBaseline, @@ -28,7 +27,6 @@ def test_standard(self): style="cifar", groups=1, ) - summary(net) _ = net(torch.rand(1, 3, 32, 32)) def test_errors(self): @@ -56,7 +54,6 @@ def test_standard(self): style="cifar", groups=1, ) - summary(net) _ = net(torch.rand(1, 3, 32, 32)) def test_errors(self): @@ -83,7 +80,6 @@ def test_standard(self): arch=11, groups=1, ) - summary(net) _ = net(torch.rand(1, 3, 32, 32)) def test_errors(self): @@ -109,9 +105,7 @@ def test_standard(self): version="std", hidden_dims=[1], ) - summary(net) _ = net(torch.rand(1, 3)) - for distribution in ["normal", "laplace", "nig"]: MLPBaseline( in_features=3, @@ -143,7 +137,6 @@ def test_standard(self): version="std", arch=0, ) - summary(net) _ = net(torch.rand(1, 3, 32, 32)) def test_errors(self): @@ -169,7 +162,6 @@ def test_standard(self): arch=50, separable=True, ).eval() - summary(net) _ = net(torch.rand(1, 3, 32, 32)) def test_errors(self): From 086213653373b76a66446d9349d63baa0ca66b9d Mon Sep 17 00:00:00 2001 From: Olivier Date: Sun, 17 Nov 2024 20:47:43 +0100 Subject: [PATCH 15/30] :heavy_minus_sign: Remove tensorboard dep. --- pyproject.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index eb4f3339..59ffdbf1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,7 +34,6 @@ dependencies = [ "timm", "lightning[pytorch-extra]>=2.0", "torchvision>=0.16", - "tensorboard", "einops", "huggingface-hub", "scikit-learn", From 1c7b8d60646b4e01ec04179109279188925f77fb Mon Sep 17 00:00:00 2001 From: Olivier Date: Sun, 17 Nov 2024 20:53:50 +0100 Subject: [PATCH 16/30] :heavy_minus_sign: Remove hf hub from necessary dep. --- pyproject.toml | 3 ++- torch_uncertainty/utils/hub.py | 25 ++++++++++++++++++++++--- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 59ffdbf1..d6c8ecb2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,6 @@ dependencies = [ "lightning[pytorch-extra]>=2.0", "torchvision>=0.16", "einops", - "huggingface-hub", "scikit-learn", "matplotlib", "numpy", @@ -52,6 +51,7 @@ image = [ ] tabular = ["pandas"] dev = [ + "huggingface-hub", "torch_uncertainty[image]", "ruff==0.6.9", "pytest-cov", @@ -71,6 +71,7 @@ all = [ "laplace-torch", "glest==0.0.1a1", "scipy", + "tensorboard", ] [project.urls] diff --git a/torch_uncertainty/utils/hub.py b/torch_uncertainty/utils/hub.py index 67fe81d3..73bd0f7f 100644 --- a/torch_uncertainty/utils/hub.py +++ b/torch_uncertainty/utils/hub.py @@ -1,10 +1,23 @@ +from importlib import util from pathlib import Path import torch import yaml -from huggingface_hub import hf_hub_download -from huggingface_hub.errors import EntryNotFoundError -from safetensors.torch import load_file + +if util.find_spec("safetensors"): + from safetensors.torch import load_file + + safetensors_installed = True +else: # coverage: ignore + safetensors_installed = False + +if util.find_spec("huggingface_hub"): + from huggingface_hub import hf_hub_download + from huggingface_hub.errors import EntryNotFoundError + + huggingface_hub_installed = True +else: # coverage: ignore + huggingface_hub_installed = False def load_hf( @@ -22,6 +35,12 @@ def load_hf( Note - License: TorchUncertainty's weights are released under the Apache 2.0 license. """ + if not huggingface_hub_installed: + raise ImportError( + "Please install huggingface_hub to use this function." + ) + if not safetensors_installed: + raise ImportError("Please install safetensors to use this function.") repo_id = f"torch-uncertainty/{weight_id}" # Load the weights From 7cd9ed9e85ed3f34e80bfac6e94d53a45d93c868 Mon Sep 17 00:00:00 2001 From: Olivier Date: Sun, 17 Nov 2024 20:57:14 +0100 Subject: [PATCH 17/30] :heavy_minus_sign: Remove sklearn hub from necessary dep. --- pyproject.toml | 2 +- torch_uncertainty/metrics/sparsification.py | 14 +++++++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index d6c8ecb2..59788722 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,6 @@ dependencies = [ "lightning[pytorch-extra]>=2.0", "torchvision>=0.16", "einops", - "scikit-learn", "matplotlib", "numpy", "rich>=10.2.2", @@ -51,6 +50,7 @@ image = [ ] tabular = ["pandas"] dev = [ + "scikit-learn", "huggingface-hub", "torch_uncertainty[image]", "ruff==0.6.9", diff --git a/torch_uncertainty/metrics/sparsification.py b/torch_uncertainty/metrics/sparsification.py index 1a55a92b..b0be6470 100644 --- a/torch_uncertainty/metrics/sparsification.py +++ b/torch_uncertainty/metrics/sparsification.py @@ -1,7 +1,16 @@ +from importlib import util + import matplotlib.pyplot as plt import numpy as np import torch -from sklearn.metrics import auc + +if util.find_spec("scikit-learn"): + from sklearn.metrics import auc + + sklearn_installed = True +else: # coverage: ignore + sklearn_installed = False + from torch import Tensor from torchmetrics.metric import Metric from torchmetrics.utilities.data import dim_zero_cat @@ -47,6 +56,9 @@ def __init__(self, **kwargs) -> None: self.add_state("scores", default=[], dist_reduce_fx="cat") self.add_state("errors", default=[], dist_reduce_fx="cat") + if not sklearn_installed: + raise ImportError("Please install scikit-learn to use AUSE.") + def update(self, scores: Tensor, errors: Tensor) -> None: """Store the scores and their associated errors for later computation. From 5702faeb8f4dc0299de2886f7a27b8e2383c513b Mon Sep 17 00:00:00 2001 From: Olivier Date: Sun, 17 Nov 2024 21:02:31 +0100 Subject: [PATCH 18/30] :bug: Fix small errors & make tests pass --- .../datamodules/classification/uci/bank_marketing.py | 2 +- .../datamodules/classification/uci/dota2_games.py | 2 +- torch_uncertainty/datamodules/classification/uci/htru2.py | 2 +- .../datamodules/classification/uci/online_shoppers.py | 2 +- .../datamodules/classification/uci/spam_base.py | 2 +- torch_uncertainty/datasets/classification/__init__.py | 7 +++++++ torch_uncertainty/metrics/sparsification.py | 2 +- 7 files changed, 13 insertions(+), 6 deletions(-) diff --git a/torch_uncertainty/datamodules/classification/uci/bank_marketing.py b/torch_uncertainty/datamodules/classification/uci/bank_marketing.py index 2f403205..9f07ce69 100644 --- a/torch_uncertainty/datamodules/classification/uci/bank_marketing.py +++ b/torch_uncertainty/datamodules/classification/uci/bank_marketing.py @@ -1,6 +1,6 @@ from pathlib import Path -from torch_uncertainty_ls.datasets import BankMarketing +from torch_uncertainty.datasets.classification import BankMarketing from .uci_classification import UCIClassificationDataModule diff --git a/torch_uncertainty/datamodules/classification/uci/dota2_games.py b/torch_uncertainty/datamodules/classification/uci/dota2_games.py index ef485349..6153e855 100644 --- a/torch_uncertainty/datamodules/classification/uci/dota2_games.py +++ b/torch_uncertainty/datamodules/classification/uci/dota2_games.py @@ -1,6 +1,6 @@ from pathlib import Path -from torch_uncertainty_ls.datasets import Dota2Games +from torch_uncertainty.datasets.classification import Dota2Games from .uci_classification import UCIClassificationDataModule diff --git a/torch_uncertainty/datamodules/classification/uci/htru2.py b/torch_uncertainty/datamodules/classification/uci/htru2.py index fdef078f..3dfdaf45 100644 --- a/torch_uncertainty/datamodules/classification/uci/htru2.py +++ b/torch_uncertainty/datamodules/classification/uci/htru2.py @@ -1,6 +1,6 @@ from pathlib import Path -from torch_uncertainty_ls.datasets import HTRU2 +from torch_uncertainty.datasets.classification import HTRU2 from .uci_classification import UCIClassificationDataModule diff --git a/torch_uncertainty/datamodules/classification/uci/online_shoppers.py b/torch_uncertainty/datamodules/classification/uci/online_shoppers.py index 8561b5cf..c5d24e11 100644 --- a/torch_uncertainty/datamodules/classification/uci/online_shoppers.py +++ b/torch_uncertainty/datamodules/classification/uci/online_shoppers.py @@ -1,6 +1,6 @@ from pathlib import Path -from torch_uncertainty_ls.datasets import OnlineShoppers +from torch_uncertainty.datasets.classification import OnlineShoppers from .uci_classification import UCIClassificationDataModule diff --git a/torch_uncertainty/datamodules/classification/uci/spam_base.py b/torch_uncertainty/datamodules/classification/uci/spam_base.py index 9bc2897e..868ab738 100644 --- a/torch_uncertainty/datamodules/classification/uci/spam_base.py +++ b/torch_uncertainty/datamodules/classification/uci/spam_base.py @@ -1,6 +1,6 @@ from pathlib import Path -from torch_uncertainty_ls.datasets import SpamBase +from torch_uncertainty.datasets.classification import SpamBase from .uci_classification import UCIClassificationDataModule diff --git a/torch_uncertainty/datasets/classification/__init__.py b/torch_uncertainty/datasets/classification/__init__.py index 9bce03a1..25983cf4 100644 --- a/torch_uncertainty/datasets/classification/__init__.py +++ b/torch_uncertainty/datasets/classification/__init__.py @@ -11,3 +11,10 @@ from .mnist_c import MNISTC from .not_mnist import NotMNIST from .openimage_o import OpenImageO +from .uci import ( + HTRU2, + BankMarketing, + Dota2Games, + OnlineShoppers, + SpamBase, +) diff --git a/torch_uncertainty/metrics/sparsification.py b/torch_uncertainty/metrics/sparsification.py index b0be6470..8977b7e0 100644 --- a/torch_uncertainty/metrics/sparsification.py +++ b/torch_uncertainty/metrics/sparsification.py @@ -4,7 +4,7 @@ import numpy as np import torch -if util.find_spec("scikit-learn"): +if util.find_spec("sklearn"): from sklearn.metrics import auc sklearn_installed = True From d9c039b01ee8417bf8b75afe144f6f76c7ca2e7f Mon Sep 17 00:00:00 2001 From: Olivier Date: Sun, 17 Nov 2024 21:10:17 +0100 Subject: [PATCH 19/30] :white_check_mark: Add some tests and improve coverage --- pyproject.toml | 2 +- tests/losses/test_classification.py | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 59788722..605a37b3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -168,5 +168,5 @@ include = ["*/torch-uncertainty/*"] omit = ["*/tests/*", "*/datasets/*"] [tool.coverage.report] -exclude_lines = ["coverage: ignore", "raise NotImplementedError"] +exclude_lines = ["coverage: ignore", "raise NotImplementedError", "raise ImportError"] ignore_errors = true diff --git a/tests/losses/test_classification.py b/tests/losses/test_classification.py index fa79e562..c0101237 100644 --- a/tests/losses/test_classification.py +++ b/tests/losses/test_classification.py @@ -2,6 +2,7 @@ import torch from torch_uncertainty.losses import ( + BCEWithLogitsLossLS, ConfidencePenaltyLoss, ConflictualLoss, DECLoss, @@ -131,3 +132,15 @@ def test_failures(self): ValueError, match="is not a valid value for reduction." ): FocalLoss(gamma=1, reduction="median") + + +class TestBCEWithLogitsLossLS: + """Testing the BCEWithLogitsLossLS class.""" + + def test_main(self): + loss = BCEWithLogitsLossLS(reduction="sum", label_smoothing=0.1) + loss(torch.tensor([[0.0, 0.0]]), torch.tensor([0])) + loss = BCEWithLogitsLossLS(label_smoothing=0.6) + loss(torch.tensor([[0.0, 0.0]]), torch.tensor([0])) + loss = BCEWithLogitsLossLS(reduction="none") + loss(torch.tensor([[0.0, 0.0]]), torch.tensor([0])) From 5e2770628253fd076da66035471df325ad8749ac Mon Sep 17 00:00:00 2001 From: Olivier Date: Sun, 17 Nov 2024 21:15:06 +0100 Subject: [PATCH 20/30] :white_check_mark: Add ds test and fix loss test --- tests/datamodules/classification/test_uci.py | 18 ++++++++++++++++++ tests/losses/test_classification.py | 6 +++--- 2 files changed, 21 insertions(+), 3 deletions(-) create mode 100644 tests/datamodules/classification/test_uci.py diff --git a/tests/datamodules/classification/test_uci.py b/tests/datamodules/classification/test_uci.py new file mode 100644 index 00000000..0d02f9a1 --- /dev/null +++ b/tests/datamodules/classification/test_uci.py @@ -0,0 +1,18 @@ +from torch_uncertainty.datamodules.classification import HTRU2DataModule + + +class TestHTRU2DataModule: + """Testing the HTRU2DataModule datamodule class.""" + + def test_htru2(self): + dm = HTRU2DataModule(root="./data/", batch_size=128) + + dm.prepare_data() + dm.setup() + + dm.train_dataloader() + dm.val_dataloader() + dm.test_dataloader() + + dm.setup("test") + dm.test_dataloader() diff --git a/tests/losses/test_classification.py b/tests/losses/test_classification.py index c0101237..bd966a93 100644 --- a/tests/losses/test_classification.py +++ b/tests/losses/test_classification.py @@ -139,8 +139,8 @@ class TestBCEWithLogitsLossLS: def test_main(self): loss = BCEWithLogitsLossLS(reduction="sum", label_smoothing=0.1) - loss(torch.tensor([[0.0, 0.0]]), torch.tensor([0])) + loss(torch.tensor([0.0]), torch.tensor([0])) loss = BCEWithLogitsLossLS(label_smoothing=0.6) - loss(torch.tensor([[0.0, 0.0]]), torch.tensor([0])) + loss(torch.tensor([0.0]), torch.tensor([0])) loss = BCEWithLogitsLossLS(reduction="none") - loss(torch.tensor([[0.0, 0.0]]), torch.tensor([0])) + loss(torch.tensor([0.0]), torch.tensor([0])) From 43d099c77788033a0103b2695b94d15aae5cc985 Mon Sep 17 00:00:00 2001 From: Olivier Date: Sun, 17 Nov 2024 21:16:06 +0100 Subject: [PATCH 21/30] :fire: Delete useless dataset tests --- tests/datasets/__init__.py | 0 tests/datasets/segmentation/__init__.py | 0 tests/datasets/segmentation/test_camvid.py | 11 ------- .../datasets/segmentation/test_cityscapes.py | 11 ------- tests/datasets/test_cifar.py | 31 ------------------- tests/datasets/test_imagenet.py | 31 ------------------- tests/datasets/test_kitti.py | 11 ------- tests/datasets/test_muad.py | 11 ------- tests/datasets/test_regression_toy.py | 10 ------ tests/datasets/test_tiny_imagenet.py | 11 ------- 10 files changed, 127 deletions(-) delete mode 100644 tests/datasets/__init__.py delete mode 100644 tests/datasets/segmentation/__init__.py delete mode 100644 tests/datasets/segmentation/test_camvid.py delete mode 100644 tests/datasets/segmentation/test_cityscapes.py delete mode 100644 tests/datasets/test_cifar.py delete mode 100644 tests/datasets/test_imagenet.py delete mode 100644 tests/datasets/test_kitti.py delete mode 100644 tests/datasets/test_muad.py delete mode 100644 tests/datasets/test_regression_toy.py delete mode 100644 tests/datasets/test_tiny_imagenet.py diff --git a/tests/datasets/__init__.py b/tests/datasets/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/datasets/segmentation/__init__.py b/tests/datasets/segmentation/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/datasets/segmentation/test_camvid.py b/tests/datasets/segmentation/test_camvid.py deleted file mode 100644 index 2777ad4d..00000000 --- a/tests/datasets/segmentation/test_camvid.py +++ /dev/null @@ -1,11 +0,0 @@ -import pytest - -from torch_uncertainty.datasets.segmentation import CamVid - - -class TestCamVid: - """Testing the CamVid dataset class.""" - - def test_nodataset(self): - with pytest.raises(RuntimeError): - _ = CamVid("./.data") diff --git a/tests/datasets/segmentation/test_cityscapes.py b/tests/datasets/segmentation/test_cityscapes.py deleted file mode 100644 index c9b9e6f5..00000000 --- a/tests/datasets/segmentation/test_cityscapes.py +++ /dev/null @@ -1,11 +0,0 @@ -import pytest - -from torch_uncertainty.datasets.segmentation import Cityscapes - - -class TestCityscapes: - """Testing the Cityscapes dataset class.""" - - def test_nodataset(self): - with pytest.raises(RuntimeError): - _ = Cityscapes("./.data") diff --git a/tests/datasets/test_cifar.py b/tests/datasets/test_cifar.py deleted file mode 100644 index a8a46a4f..00000000 --- a/tests/datasets/test_cifar.py +++ /dev/null @@ -1,31 +0,0 @@ -import pytest - -from torch_uncertainty.datasets.classification import ( - CIFAR10C, - CIFAR10H, - CIFAR100C, -) - - -class TestCIFAR10C: - """Testing the CIFAR10C dataset class.""" - - def test_nodataset(self): - with pytest.raises(RuntimeError): - _ = CIFAR10C("./.data") - - -class TestCIFAR100C: - """Testing the CIFAR100C dataset class.""" - - def test_nodataset(self): - with pytest.raises(RuntimeError): - _ = CIFAR100C("./.data") - - -class TestCIFAR10H: - """Testing the CIFAR10H dataset class.""" - - def test_nodataset_nodownload(self): - with pytest.raises(RuntimeError): - _ = CIFAR10H("./.data", download=False) diff --git a/tests/datasets/test_imagenet.py b/tests/datasets/test_imagenet.py deleted file mode 100644 index aece9021..00000000 --- a/tests/datasets/test_imagenet.py +++ /dev/null @@ -1,31 +0,0 @@ -import pytest - -from torch_uncertainty.datasets.classification import ( - ImageNetA, - ImageNetO, - ImageNetR, -) - - -class TestImageNetA: - """Testing the ImageNetA dataset class.""" - - def test_nodataset(self): - with pytest.raises(RuntimeError): - _ = ImageNetA("./.data") - - -class TestImageNetO: - """Testing the ImageNetO dataset class.""" - - def test_nodataset(self): - with pytest.raises(RuntimeError): - _ = ImageNetO("./.data") - - -class TestImageNetR: - """Testing the ImageNetR dataset class.""" - - def test_nodataset(self): - with pytest.raises(RuntimeError): - _ = ImageNetR("./.data") diff --git a/tests/datasets/test_kitti.py b/tests/datasets/test_kitti.py deleted file mode 100644 index 9afd4f4f..00000000 --- a/tests/datasets/test_kitti.py +++ /dev/null @@ -1,11 +0,0 @@ -import pytest - -from torch_uncertainty.datasets import KITTIDepth - - -class TestKITTIDepth: - """Testing the KITTIDepth dataset class.""" - - def test_nodataset(self): - with pytest.raises(FileNotFoundError): - _ = KITTIDepth("./.data", split="train") diff --git a/tests/datasets/test_muad.py b/tests/datasets/test_muad.py deleted file mode 100644 index 3a431f3f..00000000 --- a/tests/datasets/test_muad.py +++ /dev/null @@ -1,11 +0,0 @@ -import pytest - -from torch_uncertainty.datasets import MUAD - - -class TestMUAD: - """Testing the MUAD dataset class.""" - - def test_nodataset(self): - with pytest.raises(FileNotFoundError): - _ = MUAD("./.data", split="train") diff --git a/tests/datasets/test_regression_toy.py b/tests/datasets/test_regression_toy.py deleted file mode 100644 index 1b6f7bda..00000000 --- a/tests/datasets/test_regression_toy.py +++ /dev/null @@ -1,10 +0,0 @@ -from torch_uncertainty.datasets.regression.toy import Cubic - - -class TestCubic: - """Testing the Cubic dataset class.""" - - def test_main(self): - ds = Cubic(num_samples=10) - _ = ds[9] - _ = len(ds) diff --git a/tests/datasets/test_tiny_imagenet.py b/tests/datasets/test_tiny_imagenet.py deleted file mode 100644 index b29da6a3..00000000 --- a/tests/datasets/test_tiny_imagenet.py +++ /dev/null @@ -1,11 +0,0 @@ -import pytest - -from torch_uncertainty.datasets.classification import TinyImageNet - - -class TestTinyImageNet: - """Testing the TinyImageNet dataset class.""" - - def test_nodataset(self): - with pytest.raises(FileNotFoundError): - _ = TinyImageNet("./.data") From 239b96a5e964012ace35926706ba0c4419b25cd7 Mon Sep 17 00:00:00 2001 From: alafage Date: Sun, 17 Nov 2024 22:16:53 +0100 Subject: [PATCH 22/30] :bug: Fix BCEWithLogitsLSLoss so target and pred types are the same BCEWithLogitsLossLS -> BCEWithLogitsLSLoss --- docs/source/api.rst | 2 +- tests/losses/test_classification.py | 12 ++++++------ torch_uncertainty/losses/__init__.py | 2 +- torch_uncertainty/losses/classification.py | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/source/api.rst b/docs/source/api.rst index b18bb813..46e6491e 100644 --- a/docs/source/api.rst +++ b/docs/source/api.rst @@ -326,7 +326,7 @@ Losses ConfidencePenaltyLoss KLDiv ELBOLoss - BCEWithLogitsLossLS + BCEWithLogitsLSLoss Post-Processing Methods ----------------------- diff --git a/tests/losses/test_classification.py b/tests/losses/test_classification.py index bd966a93..6b9dc3ad 100644 --- a/tests/losses/test_classification.py +++ b/tests/losses/test_classification.py @@ -2,7 +2,7 @@ import torch from torch_uncertainty.losses import ( - BCEWithLogitsLossLS, + BCEWithLogitsLSLoss, ConfidencePenaltyLoss, ConflictualLoss, DECLoss, @@ -134,13 +134,13 @@ def test_failures(self): FocalLoss(gamma=1, reduction="median") -class TestBCEWithLogitsLossLS: - """Testing the BCEWithLogitsLossLS class.""" +class TestBCEWithLogitsLSLoss: + """Testing the BCEWithLogitsLSLoss class.""" def test_main(self): - loss = BCEWithLogitsLossLS(reduction="sum", label_smoothing=0.1) + loss = BCEWithLogitsLSLoss(reduction="sum", label_smoothing=0.1) loss(torch.tensor([0.0]), torch.tensor([0])) - loss = BCEWithLogitsLossLS(label_smoothing=0.6) + loss = BCEWithLogitsLSLoss(label_smoothing=0.6) loss(torch.tensor([0.0]), torch.tensor([0])) - loss = BCEWithLogitsLossLS(reduction="none") + loss = BCEWithLogitsLSLoss(reduction="none") loss(torch.tensor([0.0]), torch.tensor([0])) diff --git a/torch_uncertainty/losses/__init__.py b/torch_uncertainty/losses/__init__.py index 7d8f10c9..e0bd6c50 100644 --- a/torch_uncertainty/losses/__init__.py +++ b/torch_uncertainty/losses/__init__.py @@ -1,7 +1,7 @@ # ruff: noqa: F401 from .bayesian import ELBOLoss, KLDiv from .classification import ( - BCEWithLogitsLossLS, + BCEWithLogitsLSLoss, ConfidencePenaltyLoss, ConflictualLoss, DECLoss, diff --git a/torch_uncertainty/losses/classification.py b/torch_uncertainty/losses/classification.py index c2dfef43..16e79c7b 100644 --- a/torch_uncertainty/losses/classification.py +++ b/torch_uncertainty/losses/classification.py @@ -338,7 +338,7 @@ def forward(self, x: Tensor, y: Tensor) -> Tensor: return loss -class BCEWithLogitsLossLS(nn.BCEWithLogitsLoss): +class BCEWithLogitsLSLoss(nn.BCEWithLogitsLoss): def __init__( self, weight: Tensor | None = None, @@ -368,7 +368,7 @@ def __init__( def forward(self, preds: Tensor, targets: Tensor) -> Tensor: if self.label_smoothing == 0.0: - return super().forward(preds, targets) + return super().forward(preds, targets.type_as(preds)) targets = targets.float() targets = ( targets * (1 - self.label_smoothing) + self.label_smoothing / 2 From 241fc5f728bf439a13d14bf81b2326933cc5cc2d Mon Sep 17 00:00:00 2001 From: Olivier Date: Sun, 17 Nov 2024 22:20:49 +0100 Subject: [PATCH 23/30] :hammer: Fix DOTA case & add all to API --- docs/source/api.rst | 33 +++++++++++++++++++ torch_uncertainty/datamodules/__init__.py | 2 +- .../datamodules/classification/__init__.py | 2 +- .../classification/uci/__init__.py | 2 +- .../classification/uci/dota2_games.py | 6 ++-- .../datasets/classification/__init__.py | 2 +- .../datasets/classification/uci/__init__.py | 2 +- .../classification/uci/dota2_games.py | 4 +-- 8 files changed, 43 insertions(+), 10 deletions(-) diff --git a/docs/source/api.rst b/docs/source/api.rst index b18bb813..fdc2353f 100644 --- a/docs/source/api.rst +++ b/docs/source/api.rst @@ -380,6 +380,20 @@ Classification TinyImageNetDataModule ImageNetDataModule +UCI Tabular Classification +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + :nosignatures: + :template: class.rst + + BankMarketingDataModule + DOTA2GamesDataModule + HTRU2DataModule + OnlineShoppersDataModule + SpamBaseDataModule + Regression ^^^^^^^^^^ .. autosummary:: @@ -433,6 +447,25 @@ Classification TinyImageNetC OpenImageO + +UCI Tabular Classification +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. currentmodule:: torch_uncertainty.datasets.classification.uci + + +.. autosummary:: + :toctree: generated/ + :nosignatures: + :template: class.rst + + BankMarketing + DOTA2Games + HTRU2 + OnlineShoppers + SpamBase + + Regression ^^^^^^^^^^ diff --git a/torch_uncertainty/datamodules/__init__.py b/torch_uncertainty/datamodules/__init__.py index b4c9797d..02a28e9f 100644 --- a/torch_uncertainty/datamodules/__init__.py +++ b/torch_uncertainty/datamodules/__init__.py @@ -4,7 +4,7 @@ BankMarketingDataModule, CIFAR10DataModule, CIFAR100DataModule, - Dota2GamesDataModule, + DOTA2GamesDataModule, HTRU2DataModule, ImageNetDataModule, MNISTDataModule, diff --git a/torch_uncertainty/datamodules/classification/__init__.py b/torch_uncertainty/datamodules/classification/__init__.py index 55cbc498..20f19650 100644 --- a/torch_uncertainty/datamodules/classification/__init__.py +++ b/torch_uncertainty/datamodules/classification/__init__.py @@ -6,7 +6,7 @@ from .tiny_imagenet import TinyImageNetDataModule from .uci import ( BankMarketingDataModule, - Dota2GamesDataModule, + DOTA2GamesDataModule, HTRU2DataModule, OnlineShoppersDataModule, SpamBaseDataModule, diff --git a/torch_uncertainty/datamodules/classification/uci/__init__.py b/torch_uncertainty/datamodules/classification/uci/__init__.py index 24d2cb38..b4f53120 100644 --- a/torch_uncertainty/datamodules/classification/uci/__init__.py +++ b/torch_uncertainty/datamodules/classification/uci/__init__.py @@ -1,6 +1,6 @@ # ruff: noqa: F401 from .bank_marketing import BankMarketingDataModule -from .dota2_games import Dota2GamesDataModule +from .dota2_games import DOTA2GamesDataModule from .htru2 import HTRU2DataModule from .online_shoppers import OnlineShoppersDataModule from .spam_base import SpamBaseDataModule diff --git a/torch_uncertainty/datamodules/classification/uci/dota2_games.py b/torch_uncertainty/datamodules/classification/uci/dota2_games.py index 6153e855..8269a6c9 100644 --- a/torch_uncertainty/datamodules/classification/uci/dota2_games.py +++ b/torch_uncertainty/datamodules/classification/uci/dota2_games.py @@ -1,11 +1,11 @@ from pathlib import Path -from torch_uncertainty.datasets.classification import Dota2Games +from torch_uncertainty.datasets.classification import DOTA2Games from .uci_classification import UCIClassificationDataModule -class Dota2GamesDataModule(UCIClassificationDataModule): +class DOTA2GamesDataModule(UCIClassificationDataModule): def __init__( self, root: str | Path, @@ -37,7 +37,7 @@ def __init__( """ super().__init__( root=root, - dataset=Dota2Games, + dataset=DOTA2Games, batch_size=batch_size, val_split=val_split, test_split=test_split, diff --git a/torch_uncertainty/datasets/classification/__init__.py b/torch_uncertainty/datasets/classification/__init__.py index 25983cf4..a0b496a5 100644 --- a/torch_uncertainty/datasets/classification/__init__.py +++ b/torch_uncertainty/datasets/classification/__init__.py @@ -14,7 +14,7 @@ from .uci import ( HTRU2, BankMarketing, - Dota2Games, + DOTA2Games, OnlineShoppers, SpamBase, ) diff --git a/torch_uncertainty/datasets/classification/uci/__init__.py b/torch_uncertainty/datasets/classification/uci/__init__.py index 96f9894a..943b9282 100644 --- a/torch_uncertainty/datasets/classification/uci/__init__.py +++ b/torch_uncertainty/datasets/classification/uci/__init__.py @@ -1,6 +1,6 @@ # ruff: noqa: F401 from .bank_marketing import BankMarketing -from .dota2_games import Dota2Games +from .dota2_games import DOTA2Games from .htru2 import HTRU2 from .online_shoppers import OnlineShoppers from .spam_base import SpamBase diff --git a/torch_uncertainty/datasets/classification/uci/dota2_games.py b/torch_uncertainty/datasets/classification/uci/dota2_games.py index 3daf57eb..1236995e 100644 --- a/torch_uncertainty/datasets/classification/uci/dota2_games.py +++ b/torch_uncertainty/datasets/classification/uci/dota2_games.py @@ -10,8 +10,8 @@ from .uci_classification import UCIClassificationDataset -class Dota2Games(UCIClassificationDataset): - """The bank Marketing UCI classification dataset. +class DOTA2Games(UCIClassificationDataset): + """The DOTA 2 Games UCI classification dataset. Args: root (str): Root directory of the datasets. From 2244fb35984b1e3f6d62fb0e8fb3e81fd3c82cb9 Mon Sep 17 00:00:00 2001 From: Olivier Date: Sun, 17 Nov 2024 22:46:34 +0100 Subject: [PATCH 24/30] :white_check_mark: Improve tests --- tests/datamodules/classification/test_uci.py | 10 ++++++++++ tests/losses/test_classification.py | 8 ++++++-- .../datasets/classification/uci/uci_classification.py | 7 +++++-- 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/tests/datamodules/classification/test_uci.py b/tests/datamodules/classification/test_uci.py index 0d02f9a1..18dbc485 100644 --- a/tests/datamodules/classification/test_uci.py +++ b/tests/datamodules/classification/test_uci.py @@ -1,3 +1,5 @@ +import pytest + from torch_uncertainty.datamodules.classification import HTRU2DataModule @@ -16,3 +18,11 @@ def test_htru2(self): dm.setup("test") dm.test_dataloader() + + dm = HTRU2DataModule(root="./data/", batch_size=128, val_split=0.1) + + dm.prepare_data() + dm.setup() + + with pytest.raises(ValueError): + dm.setup("other") diff --git a/tests/losses/test_classification.py b/tests/losses/test_classification.py index 6b9dc3ad..d4c4fc28 100644 --- a/tests/losses/test_classification.py +++ b/tests/losses/test_classification.py @@ -138,9 +138,13 @@ class TestBCEWithLogitsLSLoss: """Testing the BCEWithLogitsLSLoss class.""" def test_main(self): - loss = BCEWithLogitsLSLoss(reduction="sum", label_smoothing=0.1) + loss = BCEWithLogitsLSLoss( + reduction="sum", label_smoothing=0.1, weight=torch.Tensor([1]) + ) + loss(torch.tensor([0.0]), torch.tensor([0])) + loss = BCEWithLogitsLSLoss(reduction="mean", label_smoothing=0.6) loss(torch.tensor([0.0]), torch.tensor([0])) - loss = BCEWithLogitsLSLoss(label_smoothing=0.6) + loss = BCEWithLogitsLSLoss(reduction="none", label_smoothing=0.1) loss(torch.tensor([0.0]), torch.tensor([0])) loss = BCEWithLogitsLSLoss(reduction="none") loss(torch.tensor([0.0]), torch.tensor([0])) diff --git a/torch_uncertainty/datasets/classification/uci/uci_classification.py b/torch_uncertainty/datasets/classification/uci/uci_classification.py index 0439c49c..59b557a9 100644 --- a/torch_uncertainty/datasets/classification/uci/uci_classification.py +++ b/torch_uncertainty/datasets/classification/uci/uci_classification.py @@ -70,12 +70,15 @@ def __init__( if self.need_split: self.gen = Generator().manual_seed(split_seed) - part = 1 - test_split if train else test_split self.split_idx = torch.ones(len(self)).multinomial( - num_samples=int(part * len(self)), + num_samples=int((1 - test_split) * len(self)), replacement=False, generator=self.gen, ) + if not self.train: + self.split_idx = torch.tensor( + [i for i in range(len(self)) if i not in self.split_idx] + ) self.data = self.data[self.split_idx] self.targets = self.targets[self.split_idx] if not binary: From 41bc492e0bd14af3f7bbf0b6817e2c8b4a170031 Mon Sep 17 00:00:00 2001 From: Olivier Date: Mon, 18 Nov 2024 09:43:18 +0100 Subject: [PATCH 25/30] :bug: Fix val split & loss args --- tests/datamodules/classification/test_uci.py | 13 ++++++++++++- .../classification/uci/uci_classification.py | 4 ++-- torch_uncertainty/losses/classification.py | 2 +- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/tests/datamodules/classification/test_uci.py b/tests/datamodules/classification/test_uci.py index 18dbc485..b29e7851 100644 --- a/tests/datamodules/classification/test_uci.py +++ b/tests/datamodules/classification/test_uci.py @@ -1,6 +1,12 @@ import pytest -from torch_uncertainty.datamodules.classification import HTRU2DataModule +from torch_uncertainty.datamodules.classification import ( + BankMarketingDataModule, + DOTA2GamesDataModule, + HTRU2DataModule, + OnlineShoppersDataModule, + SpamBaseDataModule, +) class TestHTRU2DataModule: @@ -26,3 +32,8 @@ def test_htru2(self): with pytest.raises(ValueError): dm.setup("other") + + dm = BankMarketingDataModule(root="./data/", batch_size=128) + dm = DOTA2GamesDataModule(root="./data/", batch_size=128) + dm = OnlineShoppersDataModule(root="./data/", batch_size=128) + dm = SpamBaseDataModule(root="./data/", batch_size=128) diff --git a/torch_uncertainty/datasets/classification/uci/uci_classification.py b/torch_uncertainty/datasets/classification/uci/uci_classification.py index 59b557a9..c976ad9f 100644 --- a/torch_uncertainty/datasets/classification/uci/uci_classification.py +++ b/torch_uncertainty/datasets/classification/uci/uci_classification.py @@ -68,12 +68,12 @@ def __init__( self._standardize() if self.need_split: - self.gen = Generator().manual_seed(split_seed) + gen = Generator().manual_seed(split_seed) self.split_idx = torch.ones(len(self)).multinomial( num_samples=int((1 - test_split) * len(self)), replacement=False, - generator=self.gen, + generator=gen, ) if not self.train: self.split_idx = torch.tensor( diff --git a/torch_uncertainty/losses/classification.py b/torch_uncertainty/losses/classification.py index 16e79c7b..1ab56b8c 100644 --- a/torch_uncertainty/losses/classification.py +++ b/torch_uncertainty/losses/classification.py @@ -363,7 +363,7 @@ def __init__( label_smoothing (float, optional): The label smoothing factor. Defaults to 0.0. """ - super().__init__(weight, reduction) + super().__init__(weight=weight, reduction=reduction) self.label_smoothing = label_smoothing def forward(self, preds: Tensor, targets: Tensor) -> Tensor: From 779e426089c7747b9b1f7c5a5299352eb5d95c4a Mon Sep 17 00:00:00 2001 From: Olivier Date: Mon, 18 Nov 2024 09:53:21 +0100 Subject: [PATCH 26/30] :fire: Remove segformer dead code --- .../models/segmentation/segformer.py | 96 ++++++++----------- 1 file changed, 39 insertions(+), 57 deletions(-) diff --git a/torch_uncertainty/models/segmentation/segformer.py b/torch_uncertainty/models/segmentation/segformer.py index e13258e8..cf71fce8 100644 --- a/torch_uncertainty/models/segmentation/segformer.py +++ b/torch_uncertainty/models/segmentation/segformer.py @@ -42,17 +42,12 @@ def __init__( def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) elif isinstance(m, nn.Conv2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels fan_out //= m.groups m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) - if m.bias is not None: - m.bias.data.zero_() + nn.init.constant_(m.bias, 0) def forward(self, x, h, w): x = self.fc1(x) @@ -66,13 +61,13 @@ def forward(self, x, h, w): class Attention(nn.Module): def __init__( self, - dim, - num_heads=8, - qkv_bias=False, - qk_scale=None, - attn_drop=0.0, - proj_drop=0.0, - sr_ratio=1, + dim: int, + num_heads: int = 8, + qkv_bias: bool = False, + qk_scale: float | None = None, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + sr_ratio: int = 1, ): super().__init__() assert ( @@ -100,8 +95,7 @@ def __init__( def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) + nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @@ -109,10 +103,9 @@ def _init_weights(self, m): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels fan_out //= m.groups m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) - if m.bias is not None: - m.bias.data.zero_() + nn.init.constant_(m.bias, 0) - def forward(self, x, h, w): + def forward(self, x: Tensor, h: int, w: int): b, n, c = x.shape q = ( self.q(x) @@ -149,17 +142,17 @@ def forward(self, x, h, w): class Block(nn.Module): def __init__( self, - dim, - num_heads, - mlp_ratio=4.0, - qkv_bias=False, - qk_scale=None, - dropout=0.0, - attn_drop=0.0, - drop_path=0.0, + dim: int, + num_heads: int, + mlp_ratio: float = 4.0, + qkv_bias: bool = False, + qk_scale: None | float = None, + dropout: float = 0.0, + attn_drop: float = 0.0, + drop_path: float = 0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, - sr_ratio=1, + sr_ratio: int = 1, ): super().__init__() self.norm1 = norm_layer(dim) @@ -191,8 +184,7 @@ def __init__( def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) + nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @@ -200,8 +192,7 @@ def _init_weights(self, m): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels fan_out //= m.groups m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) - if m.bias is not None: - m.bias.data.zero_() + nn.init.constant_(m.bias, 0) def forward(self, x, h, w): x = x + self.drop_path(self.attn(self.norm1(x), h, w)) @@ -237,19 +228,14 @@ def __init__( self.apply(self._init_weights) def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=0.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): + if isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) elif isinstance(m, nn.Conv2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels fan_out //= m.groups m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) - if m.bias is not None: - m.bias.data.zero_() + nn.init.constant_(m.bias, 0) def forward(self, x): x = self.proj(x) @@ -263,20 +249,20 @@ def forward(self, x): class MixVisionTransformer(nn.Module): def __init__( self, - img_size, - in_channels, - num_classes, - embed_dims, - num_heads, - mlp_ratios, - qkv_bias, - qk_scale, - drop_rate, - attn_drop_rate, - drop_path_rate, + img_size: int, + in_channels: int, + num_classes: int, + embed_dims: list[int], + num_heads: list[int], + mlp_ratios: list[int], + qkv_bias: bool, + qk_scale: float | None, + drop_rate: float, + attn_drop_rate: float, + drop_path_rate: float, norm_layer, depths, - sr_ratios, + sr_ratios: list[int], ): super().__init__() self.num_classes = num_classes @@ -401,8 +387,7 @@ def __init__( def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) + nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @@ -410,8 +395,7 @@ def _init_weights(self, m): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels fan_out //= m.groups m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) - if m.bias is not None: - m.bias.data.zero_() + nn.init.constant_(m.bias, 0) def forward_features(self, x): b = x.shape[0] @@ -509,7 +493,7 @@ def forward(self, inputs: Tensor) -> Tensor: def resize( inputs: Tensor, - size: tuple[int] | torch.Size | None = None, + size: torch.Size | None = None, scale_factor=None, mode: str = "nearest", align_corners: bool | None = None, @@ -532,8 +516,6 @@ def resize( (input_h, input_w), (output_h, output_w), ) - if isinstance(size, torch.Size): - size = tuple(int(x) for x in size) return F.interpolate(inputs, size, scale_factor, mode, align_corners) From 7dbecb829cddf3f0ff81aef28433dd50857e4c29 Mon Sep 17 00:00:00 2001 From: Olivier Date: Mon, 18 Nov 2024 10:16:41 +0100 Subject: [PATCH 27/30] :white_check_mark: Continue improving cov. --- .../classification/test_cifar10.py | 10 ++++ .../classification/test_risk_coverage.py | 59 ++++++++++--------- tests/models/test_wideresnets.py | 3 + tests/transforms/test_corruption.py | 7 +++ .../datamodules/classification/cifar10.py | 4 +- torch_uncertainty/layers/bayesian/lpbnn.py | 5 +- 6 files changed, 54 insertions(+), 34 deletions(-) diff --git a/tests/datamodules/classification/test_cifar10.py b/tests/datamodules/classification/test_cifar10.py index 7f34ac7b..45c672c0 100644 --- a/tests/datamodules/classification/test_cifar10.py +++ b/tests/datamodules/classification/test_cifar10.py @@ -72,6 +72,16 @@ def test_cifar10_main(self): auto_augment="rand-m9-n2-mstd0.5", ) + with pytest.raises( + ValueError, match="CIFAR-H can only be used in testing." + ): + dm = CIFAR10DataModule( + root="./data/", + batch_size=128, + test_alt="h", + ) + dm.setup("fit") + with pytest.raises(ValueError, match="Test set "): dm = CIFAR10DataModule( root="./data/", diff --git a/tests/metrics/classification/test_risk_coverage.py b/tests/metrics/classification/test_risk_coverage.py index 63e82f43..85981d5d 100644 --- a/tests/metrics/classification/test_risk_coverage.py +++ b/tests/metrics/classification/test_risk_coverage.py @@ -13,42 +13,45 @@ class TestAURC: """Testing the AURC metric class.""" def test_compute_binary(self) -> None: - probs = torch.as_tensor([0.1, 0.2, 0.3, 0.4, 0.2]) - targets = torch.as_tensor([1, 1, 1, 1, 1]) + probs = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.2]) + targets = torch.Tensor([1, 1, 1, 1, 1]) metric = AURC() assert metric(probs, targets).item() == pytest.approx(1) - targets = torch.as_tensor([0, 0, 0, 0, 0]) + targets = torch.Tensor([0, 0, 0, 0, 0]) metric = AURC() assert metric(probs, targets).item() == pytest.approx(0) - targets = torch.as_tensor([0, 0, 1, 1, 0]) + targets = torch.Tensor([0, 0, 1, 1, 0]) metric = AURC() value = (0 * 0.4 + 0.25 * 0.2 / 2 + 0.25 * 0.2 + 0.15 * 0.2 / 2) / 0.8 assert metric(probs, targets).item() == pytest.approx(value) + metric = AURC() + assert torch.isnan(metric(torch.Tensor([0.0]), torch.Tensor([1]))) + def test_compute_multiclass(self) -> None: - probs = torch.as_tensor( + probs = torch.Tensor( [[0.1, 0.9], [0.2, 0.8], [0.3, 0.7], [0.4, 0.6], [0.2, 0.8]] ) - targets = torch.as_tensor([1, 1, 1, 1, 1]).long() + targets = torch.Tensor([1, 1, 1, 1, 1]).long() metric = AURC() assert metric(probs, targets).item() == pytest.approx(0) - targets = torch.as_tensor([0, 0, 0, 0, 0]) + targets = torch.Tensor([0, 0, 0, 0, 0]) metric = AURC() assert metric(probs, targets).item() == pytest.approx(1) - targets = torch.as_tensor([1, 1, 0, 0, 1]) + targets = torch.Tensor([1, 1, 0, 0, 1]) metric = AURC() value = (0 * 0.4 + 0.25 * 0.2 / 2 + 0.25 * 0.2 + 0.15 * 0.2 / 2) / 0.8 assert metric(probs, targets).item() == pytest.approx(value) def test_compute_nan(self) -> None: - probs = torch.as_tensor([[0.1, 0.9]]) - targets = torch.as_tensor([1]).long() + probs = torch.Tensor([[0.1, 0.9]]) + targets = torch.Tensor([1]).long() metric = AURC() assert torch.isnan(metric(probs, targets)).all() def test_plot(self) -> None: - scores = torch.as_tensor([0.2, 0.1, 0.5, 0.3, 0.4]) - values = torch.as_tensor([0.1, 0.2, 0.3, 0.4, 0.5]) + scores = torch.Tensor([0.2, 0.1, 0.5, 0.3, 0.4]) + values = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5]) metric = AURC() metric.update(scores, values) fig, ax = metric.plot() @@ -75,28 +78,26 @@ class TestCovAtxRisk: """Testing the CovAtxRisk metric class.""" def test_compute_zero(self) -> None: - probs = torch.as_tensor( + probs = torch.Tensor( [[0.9, 0.1], [0.8, 0.2], [0.7, 0.3], [0.6, 0.4], [0.8, 0.2]] ) - targets = torch.as_tensor([1, 1, 1, 1, 1]) + targets = torch.Tensor([1, 1, 1, 1, 1]) metric = CovAtxRisk(risk_threshold=0.5) # no cov for given risk assert torch.isnan(metric(probs, targets)) - probs = torch.as_tensor( - [0.1, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.48, 0.49] - ) - targets = torch.as_tensor([1, 0, 1, 1, 1, 0, 0, 0, 1]) + probs = torch.Tensor([0.1, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.48, 0.49]) + targets = torch.Tensor([1, 0, 1, 1, 1, 0, 0, 0, 1]) metric = CovAtxRisk(risk_threshold=0.55) # multiple cov for given risk assert metric(probs, targets) == pytest.approx(8 / 9) - probs = torch.as_tensor([0.1, 0.2, 0.3, 0.4, 0.2]) - targets = torch.as_tensor([0, 0, 1, 1, 1]) + probs = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.2]) + targets = torch.Tensor([0, 0, 1, 1, 1]) metric = CovAtxRisk(risk_threshold=0.5) assert metric(probs, targets) == pytest.approx(4 / 5) - targets = torch.as_tensor([0, 0, 1, 1, 0]) + targets = torch.Tensor([0, 0, 1, 1, 0]) metric = CovAtxRisk(risk_threshold=0.5) assert metric(probs, targets) == 1 @@ -118,24 +119,24 @@ class TestRiskAtxCov: """Testing the RiskAtxCov metric class.""" def test_compute_zero(self) -> None: - probs = torch.as_tensor( + probs = torch.Tensor( [[0.9, 0.1], [0.8, 0.2], [0.7, 0.3], [0.6, 0.4], [0.8, 0.2]] ) - targets = torch.as_tensor([1, 1, 1, 1, 1]) + targets = torch.Tensor([1, 1, 1, 1, 1]) metric = RiskAtxCov(cov_threshold=0.5) assert metric(probs, targets) == 1 - probs = torch.as_tensor([0.1, 0.2, 0.3, 0.4, 0.2]) - targets = torch.as_tensor([0, 0, 1, 1, 1]) + probs = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.2]) + targets = torch.Tensor([0, 0, 1, 1, 1]) metric = RiskAtxCov(cov_threshold=0.5) assert metric(probs, targets) == pytest.approx(1 / 3) - probs = torch.as_tensor([0.1, 0.19, 0.3, 0.15, 0.4, 0.2]) - targets = torch.as_tensor([0, 0, 1, 0, 1, 1]) + probs = torch.Tensor([0.1, 0.19, 0.3, 0.15, 0.4, 0.2]) + targets = torch.Tensor([0, 0, 1, 0, 1, 1]) metric = RiskAtxCov(cov_threshold=0.5) assert metric(probs, targets) == 0 - probs = torch.as_tensor([0.1, 0.2, 0.3, 0.15, 0.4, 0.2]) - targets = torch.as_tensor([0, 0, 1, 0, 1, 1]) + probs = torch.Tensor([0.1, 0.2, 0.3, 0.15, 0.4, 0.2]) + targets = torch.Tensor([0, 0, 1, 0, 1, 1]) metric = RiskAtxCov(cov_threshold=0.55) assert metric(probs, targets) == 1 / 4 diff --git a/tests/models/test_wideresnets.py b/tests/models/test_wideresnets.py index 72c518fc..62045d63 100644 --- a/tests/models/test_wideresnets.py +++ b/tests/models/test_wideresnets.py @@ -21,6 +21,9 @@ class TestStdWide: + def test_main(self): + wideresnet28x10(in_channels=1, num_classes=10, style="imagenet") + def test_error(self): with pytest.raises(ValueError): wideresnet28x10(in_channels=1, num_classes=10, style="test") diff --git a/tests/transforms/test_corruption.py b/tests/transforms/test_corruption.py index 4e1a5e59..1ee28555 100644 --- a/tests/transforms/test_corruption.py +++ b/tests/transforms/test_corruption.py @@ -133,9 +133,16 @@ def test_fog(self): inputs = torch.rand(3, 32, 32) transform = Fog(1, size=32) transform(inputs) + + with pytest.raises(ValueError, match="Image must be square. Got "): + transform(torch.rand(3, 32, 12)) + transform = Fog(0, size=32) transform(inputs) + with pytest.raises(ValueError, match="Size must be a power of 2. Got "): + _ = Fog(1, size=15) + def test_brightness(self): inputs = torch.rand(3, 32, 32) transform = Brightness(1) diff --git a/torch_uncertainty/datamodules/classification/cifar10.py b/torch_uncertainty/datamodules/classification/cifar10.py index bda1a948..3ee9c8a3 100644 --- a/torch_uncertainty/datamodules/classification/cifar10.py +++ b/torch_uncertainty/datamodules/classification/cifar10.py @@ -159,8 +159,8 @@ def prepare_data(self) -> None: # coverage: ignore def setup(self, stage: Literal["fit", "test"] | None = None) -> None: if stage == "fit" or stage is None: - if self.test_alt in ("c", "h"): - raise ValueError("CIFAR-C and H can only be used in testing.") + if self.test_alt == "h": + raise ValueError("CIFAR-H can only be used in testing.") full = self.dataset( self.root, train=True, diff --git a/torch_uncertainty/layers/bayesian/lpbnn.py b/torch_uncertainty/layers/bayesian/lpbnn.py index b2585749..2b53305a 100644 --- a/torch_uncertainty/layers/bayesian/lpbnn.py +++ b/torch_uncertainty/layers/bayesian/lpbnn.py @@ -288,9 +288,8 @@ def reset_parameters(self): self.latent_logvar.reset_parameters() if self.bias is not None: fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.conv.weight) - if fan_in != 0: - bound = 1 / math.sqrt(fan_in) - nn.init.uniform_(self.bias, -bound, bound) + bound = 1 / math.sqrt(fan_in) + nn.init.uniform_(self.bias, -bound, bound) def forward(self, x: Tensor) -> Tensor: # Draw a sample from the dist generated by the latent noise self.alpha From 8b80532c9fe3eba7e26d4f5ce99f3414f5457d3e Mon Sep 17 00:00:00 2001 From: Olivier Date: Mon, 18 Nov 2024 10:17:51 +0100 Subject: [PATCH 28/30] :zap: Update version for release --- docs/source/conf.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 2b6426ea..aa6f332a 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -15,7 +15,7 @@ f"{datetime.now().year!s}, Adrien Lafage and Olivier Laurent" ) author = "Adrien Lafage and Olivier Laurent" -release = "0.3.0" +release = "0.3.1" # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration diff --git a/pyproject.toml b/pyproject.toml index 605a37b3..8eda4b00 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "flit_core.buildapi" [project] name = "torch_uncertainty" -version = "0.3.0" +version = "0.3.1" authors = [ { name = "ENSTA U2IS", email = "olivier.laurent@ensta-paris.fr" }, { name = "Adrien Lafage", email = "adrienlafage@outlook.com" }, From 4022f8af096b93480a5e2ac86e21dbd80476e12b Mon Sep 17 00:00:00 2001 From: alafage Date: Mon, 18 Nov 2024 10:34:36 +0100 Subject: [PATCH 29/30] :books: Fix typo in Quickstart --- docs/source/quickstart.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/quickstart.rst b/docs/source/quickstart.rst index 42d4dfb7..26ebcb89 100644 --- a/docs/source/quickstart.rst +++ b/docs/source/quickstart.rst @@ -89,7 +89,7 @@ CIFAR10 datamodule. from lightning.pytorch import TUTrainer dm = CIFAR10DataModule(root="data", batch_size=32) - trainer = TUTTrainer(gpus=1, max_epochs=100) + trainer = TUTrainer(gpus=1, max_epochs=100) trainer.fit(routine, dm) trainer.test(routine, dm) From cce5e1d457a2b0a23c7cb4ba162e54706c75ac07 Mon Sep 17 00:00:00 2001 From: Olivier Date: Mon, 18 Nov 2024 10:23:40 +0100 Subject: [PATCH 30/30] :zap: Update ruff and rm np from explicit dep. --- pyproject.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 8eda4b00..38708ec5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,7 +36,6 @@ dependencies = [ "torchvision>=0.16", "einops", "matplotlib", - "numpy", "rich>=10.2.2", "seaborn", ] @@ -53,7 +52,7 @@ dev = [ "scikit-learn", "huggingface-hub", "torch_uncertainty[image]", - "ruff==0.6.9", + "ruff==0.7.4", "pytest-cov", "pre-commit", "pre-commit-hooks",