diff --git a/configs/vision/pathology/offline/segmentation/bcss.yaml b/configs/vision/pathology/offline/segmentation/bcss.yaml index 8265441d..9ea0f1a6 100644 --- a/configs/vision/pathology/offline/segmentation/bcss.yaml +++ b/configs/vision/pathology/offline/segmentation/bcss.yaml @@ -66,7 +66,7 @@ model: optimizer: class_path: torch.optim.AdamW init_args: - lr: ${oc.env:LR_VALUE, 0.0001} + lr: ${oc.env:LR_VALUE, 0.002} lr_scheduler: class_path: torch.optim.lr_scheduler.PolynomialLR init_args: diff --git a/configs/vision/pathology/offline/segmentation/consep.yaml b/configs/vision/pathology/offline/segmentation/consep.yaml index 68d30a59..6ceb085c 100644 --- a/configs/vision/pathology/offline/segmentation/consep.yaml +++ b/configs/vision/pathology/offline/segmentation/consep.yaml @@ -66,7 +66,7 @@ model: optimizer: class_path: torch.optim.AdamW init_args: - lr: ${oc.env:LR_VALUE, 0.0001} + lr: ${oc.env:LR_VALUE, 0.002} lr_scheduler: class_path: torch.optim.lr_scheduler.PolynomialLR init_args: diff --git a/configs/vision/pathology/offline/segmentation/monusac.yaml b/configs/vision/pathology/offline/segmentation/monusac.yaml index 26547aeb..b89d4eb6 100644 --- a/configs/vision/pathology/offline/segmentation/monusac.yaml +++ b/configs/vision/pathology/offline/segmentation/monusac.yaml @@ -68,7 +68,7 @@ model: optimizer: class_path: torch.optim.AdamW init_args: - lr: ${oc.env:LR_VALUE, 0.0001} + lr: ${oc.env:LR_VALUE, 0.002} lr_scheduler: class_path: torch.optim.lr_scheduler.PolynomialLR init_args: diff --git a/configs/vision/pathology/online/segmentation/consep.yaml b/configs/vision/pathology/online/segmentation/consep.yaml index e5b6de15..e17fcae2 100644 --- a/configs/vision/pathology/online/segmentation/consep.yaml +++ b/configs/vision/pathology/online/segmentation/consep.yaml @@ -59,7 +59,7 @@ model: optimizer: class_path: torch.optim.AdamW init_args: - lr: ${oc.env:LR_VALUE, 0.0001} + lr: ${oc.env:LR_VALUE, 0.002} lr_scheduler: class_path: torch.optim.lr_scheduler.PolynomialLR init_args: diff --git a/configs/vision/pathology/online/segmentation/monusac.yaml b/configs/vision/pathology/online/segmentation/monusac.yaml index 08b05644..6b0e9a50 100644 --- a/configs/vision/pathology/online/segmentation/monusac.yaml +++ b/configs/vision/pathology/online/segmentation/monusac.yaml @@ -60,7 +60,7 @@ model: optimizer: class_path: torch.optim.AdamW init_args: - lr: ${oc.env:LR_VALUE, 0.0001} + lr: ${oc.env:LR_VALUE, 0.002} lr_scheduler: class_path: torch.optim.lr_scheduler.PolynomialLR init_args: diff --git a/docs/leaderboards.md b/docs/leaderboards.md index 66e53f4e..c0570c15 100644 --- a/docs/leaderboards.md +++ b/docs/leaderboards.md @@ -40,7 +40,7 @@ We selected this approach to prioritize reliable, robust and fair FM-evaluation | **Output activation function** | none | none | none | | **Number of steps** | 12,500 | 12,500 (1) | 2,000 | | **Base batch size** | 256 | 32 | 64 | -| **Base learning rate** | 0.0003 | 0.001 | 0.0001 | +| **Base learning rate** | 0.0003 | 0.001 | 0.002 | | **Early stopping** | 5% * [Max epochs] | 10% * [Max epochs] (2) | 10% * [Max epochs] (2) | | **Optimizer** | SGD | AdamW | AdamW | | **Momentum** | 0.9 | n/a | n/a | diff --git a/src/eva/core/models/__init__.py b/src/eva/core/models/__init__.py index 16cfca96..a5f81a15 100644 --- a/src/eva/core/models/__init__.py +++ b/src/eva/core/models/__init__.py @@ -2,7 +2,13 @@ from eva.core.models.modules import HeadModule, InferenceModule from eva.core.models.networks import MLP -from eva.core.models.wrappers import BaseModel, HuggingFaceModel, ModelFromFunction, ONNXModel +from eva.core.models.wrappers import ( + BaseModel, + HuggingFaceModel, + ModelFromFunction, + ONNXModel, + TorchHubModel, +) __all__ = [ "HeadModule", @@ -12,4 +18,5 @@ "HuggingFaceModel", "ModelFromFunction", "ONNXModel", + "TorchHubModel", ] diff --git a/src/eva/core/models/wrappers/from_torchhub.py b/src/eva/core/models/wrappers/from_torchhub.py index cb424d01..2a80aaf5 100644 --- a/src/eva/core/models/wrappers/from_torchhub.py +++ b/src/eva/core/models/wrappers/from_torchhub.py @@ -1,6 +1,6 @@ """Model wrapper for torch.hub models.""" -from typing import Any, Callable, Dict, Tuple +from typing import Any, Callable, Dict, List, Tuple import torch import torch.nn as nn @@ -72,7 +72,7 @@ def load_model(self) -> None: TorchHubModel.__name__ = self._model_name @override - def model_forward(self, tensor: torch.Tensor) -> torch.Tensor: + def model_forward(self, tensor: torch.Tensor) -> torch.Tensor | List[torch.Tensor]: if self._out_indices is not None: if not hasattr(self._model, "get_intermediate_layers"): raise ValueError( @@ -80,8 +80,14 @@ def model_forward(self, tensor: torch.Tensor) -> torch.Tensor: "when using `out_indices`." ) - return self._model.get_intermediate_layers( - tensor, self._out_indices, reshape=True, return_class_token=False, norm=self._norm + return list( + self._model.get_intermediate_layers( + tensor, + self._out_indices, + reshape=True, + return_class_token=False, + norm=self._norm, + ) ) return self._model(tensor)