Skip to content

Commit

Permalink
feat: rename epoch_size to epoch_count (#962)
Browse files Browse the repository at this point in the history
### Summary of Changes

In `fit` methods of `NeuralNetworkRegressor` and
`NeuralNetworkClassifier`, rename the parameter `epoch_size` to
`epoch_count.

The previous name did not match, since an epoch has no size.

---------

Co-authored-by: megalinter-bot <[email protected]>
  • Loading branch information
lars-reimann and megalinter-bot authored Nov 26, 2024
1 parent afafd43 commit fa62e9b
Show file tree
Hide file tree
Showing 8 changed files with 56 additions and 56 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -197,11 +197,11 @@
"id": "3d8efa74951725cb"
},
{
"cell_type": "code",
"source": "cnn_fitted = cnn.fit(dataset, epoch_size=8, batch_size=16)",
"metadata": {
"collapsed": false
},
"cell_type": "code",
"source": "cnn_fitted = cnn.fit(dataset, epoch_count=8, batch_size=16)",
"id": "381627a94d500675",
"outputs": [],
"execution_count": null
Expand Down
2 changes: 1 addition & 1 deletion docs/tutorials/time_series_forecasting.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@
" forecast_horizon=1,\n",
" continuous=False,\n",
" extra_names= [\"date\"]\n",
"), epoch_size=25)"
"), epoch_count=25)"
],
"metadata": {
"collapsed": false,
Expand Down
36 changes: 18 additions & 18 deletions src/safeds/ml/nn/_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ def from_pretrained_model(huggingface_repo: str) -> NeuralNetworkRegressor: # p
def fit(
self,
train_data: IFT,
epoch_size: int = 25,
epoch_count: int = 25,
batch_size: int = 1,
learning_rate: float = 0.001,
callback_on_batch_completion: Callable[[int, float], None] | None = None,
Expand All @@ -181,7 +181,7 @@ def fit(
----------
train_data:
The data the network should be trained on.
epoch_size:
epoch_count:
The number of times the training cycle should be done.
batch_size:
The size of data batches that should be loaded at one time.
Expand All @@ -202,7 +202,7 @@ def fit(
Raises
------
OutOfBoundsError
If epoch_size < 1
If epoch_count < 1
If batch_size < 1
"""
import torch
Expand All @@ -218,7 +218,7 @@ def fit(
if not self._input_conversion._is_fit_data_valid(train_data):
raise FeatureDataMismatchError

_check_bounds("epoch_size", epoch_size, lower_bound=_ClosedBound(1))
_check_bounds("epoch_count", epoch_count, lower_bound=_ClosedBound(1))
_check_bounds("batch_size", batch_size, lower_bound=_ClosedBound(1))

copied_model = copy.deepcopy(self)
Expand All @@ -236,7 +236,7 @@ def fit(
loss_fn = nn.MSELoss()

optimizer = torch.optim.SGD(copied_model._model.parameters(), lr=learning_rate)
for _ in range(epoch_size):
for _ in range(epoch_count):
loss_sum = 0.0
amount_of_loss_values_calculated = 0
for x, y in iter(dataloader):
Expand Down Expand Up @@ -273,7 +273,7 @@ def fit(
# "median_absolute_deviation",
# "coefficient_of_determination",
# ],
# epoch_size: int = 25,
# epoch_count: int = 25,
# batch_size: int = 1,
# learning_rate: float = 0.001,
# ) -> Self:
Expand All @@ -288,7 +288,7 @@ def fit(
# The data the network should be trained on.
# optimization_metric:
# The metric that should be used for determining the performance of a model.
# epoch_size:
# epoch_count:
# The number of times the training cycle should be done.
# batch_size:
# The size of data batches that should be loaded at one time.
Expand Down Expand Up @@ -317,7 +317,7 @@ def fit(
# "Hyperparameter optimization is currently not supported for CNN Regression Tasks.",
# ) # pragma: no cover
#
# _check_bounds("epoch_size", epoch_size, lower_bound=_ClosedBound(1))
# _check_bounds("epoch_count", epoch_count, lower_bound=_ClosedBound(1))
# _check_bounds("batch_size", batch_size, lower_bound=_ClosedBound(1))
#
# list_of_models = self._get_models_for_all_choices()
Expand All @@ -334,7 +334,7 @@ def fit(
# executor.submit(
# model.fit,
# train_set, # type: ignore[arg-type]
# epoch_size,
# epoch_count,
# batch_size,
# learning_rate,
# ),
Expand Down Expand Up @@ -774,7 +774,7 @@ def from_pretrained_model(huggingface_repo: str) -> NeuralNetworkClassifier: #
def fit(
self,
train_data: IFT,
epoch_size: int = 25,
epoch_count: int = 25,
batch_size: int = 1,
learning_rate: float = 0.001,
callback_on_batch_completion: Callable[[int, float], None] | None = None,
Expand All @@ -789,7 +789,7 @@ def fit(
----------
train_data:
The data the network should be trained on.
epoch_size:
epoch_count:
The number of times the training cycle should be done.
batch_size:
The size of data batches that should be loaded at one time.
Expand All @@ -810,7 +810,7 @@ def fit(
Raises
------
ValueError
If epoch_size < 1
If epoch_count < 1
If batch_size < 1
"""
import torch
Expand All @@ -831,7 +831,7 @@ def fit(
if not self._input_conversion._is_fit_data_valid(train_data):
raise FeatureDataMismatchError

_check_bounds("epoch_size", epoch_size, lower_bound=_ClosedBound(1))
_check_bounds("epoch_count", epoch_count, lower_bound=_ClosedBound(1))
_check_bounds("batch_size", batch_size, lower_bound=_ClosedBound(1))

copied_model = copy.deepcopy(self)
Expand All @@ -856,7 +856,7 @@ def fit(
loss_fn = nn.BCELoss()

optimizer = torch.optim.SGD(copied_model._model.parameters(), lr=learning_rate)
for _ in range(epoch_size):
for _ in range(epoch_count):
loss_sum = 0.0
amount_of_loss_values_calculated = 0
for x, y in iter(dataloader):
Expand Down Expand Up @@ -890,7 +890,7 @@ def fit(
# train_data: IFT,
# optimization_metric: Literal["accuracy", "precision", "recall", "f1_score"],
# positive_class: Any = None,
# epoch_size: int = 25,
# epoch_count: int = 25,
# batch_size: int = 1,
# learning_rate: float = 0.001,
# ) -> Self:
Expand All @@ -907,7 +907,7 @@ def fit(
# The metric that should be used for determining the performance of a model.
# positive_class:
# The class to be considered positive. Only needs to be provided when choosing precision, recall or f1_score as the optimization metric.
# epoch_size:
# epoch_count:
# The number of times the training cycle should be done.
# batch_size:
# The size of data batches that should be loaded at one time.
Expand Down Expand Up @@ -936,7 +936,7 @@ def fit(
# "Continuous Predictions are currently not supported for Time Series Classification.",
# )
#
# _check_bounds("epoch_size", epoch_size, lower_bound=_ClosedBound(1))
# _check_bounds("epoch_count", epoch_count, lower_bound=_ClosedBound(1))
# _check_bounds("batch_size", batch_size, lower_bound=_ClosedBound(1))
#
# list_of_models = self._get_models_for_all_choices()
Expand All @@ -956,7 +956,7 @@ def fit(
# executor.submit(
# model.fit,
# train_set, # type: ignore[arg-type]
# epoch_size,
# epoch_count,
# batch_size,
# learning_rate,
# ),
Expand Down
12 changes: 6 additions & 6 deletions tests/safeds/ml/nn/test_cnn_workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@

import pytest
import torch
from torch.types import Device

from safeds._config import _get_device
from safeds.data.image.containers import ImageList
from safeds.data.image.containers._single_size_image_list import _SingleSizeImageList
Expand All @@ -27,8 +29,6 @@
MaxPooling2DLayer,
)
from safeds.ml.nn.typing import VariableImageSize
from torch.types import Device

from tests.helpers import configure_test_with_device, device_cpu, device_cuda, images_all, resolve_resource_path

if TYPE_CHECKING:
Expand Down Expand Up @@ -88,7 +88,7 @@ def test_should_train_and_predict_model(
InputConversionImageToTable(image_dataset.input_size),
layers,
)
nn = nn_original.fit(image_dataset, epoch_size=2)
nn = nn_original.fit(image_dataset, epoch_count=2)
assert nn_original._model is not nn._model
prediction: ImageDataset = nn.predict(image_dataset.get_input())
assert one_hot_encoder.inverse_transform(prediction.get_output()) == Table({"class": prediction_label})
Expand Down Expand Up @@ -147,7 +147,7 @@ def test_should_train_and_predict_model(
InputConversionImageToColumn(image_dataset.input_size),
layers,
)
nn = nn_original.fit(image_dataset, epoch_size=2)
nn = nn_original.fit(image_dataset, epoch_count=2)
assert nn_original._model is not nn._model
prediction: ImageDataset = nn.predict(image_dataset.get_input())
assert prediction.get_output() == Column("class", prediction_label)
Expand Down Expand Up @@ -188,7 +188,7 @@ def test_should_train_and_predict_model(
InputConversionImageToImage(image_dataset.input_size),
layers,
)
nn = nn_original.fit(image_dataset, epoch_size=20)
nn = nn_original.fit(image_dataset, epoch_count=20)
assert nn_original._model is not nn._model
prediction = nn.predict(image_dataset.get_input())
assert isinstance(prediction.get_output(), ImageList)
Expand Down Expand Up @@ -229,7 +229,7 @@ def test_should_train_and_predict_model_variable_image_size(
InputConversionImageToImage(VariableImageSize.from_image_size(image_dataset.input_size)),
layers,
)
nn = nn_original.fit(image_dataset, epoch_size=20)
nn = nn_original.fit(image_dataset, epoch_count=20)
assert nn_original._model is not nn._model
prediction = nn.predict(
image_dataset.get_input().resize(
Expand Down
6 changes: 3 additions & 3 deletions tests/safeds/ml/nn/test_dropout_workflow.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
import pytest
from torch.types import Device

from safeds._config import _get_device
from safeds.data.tabular.containers import Table
from safeds.ml.nn import (
Expand All @@ -11,8 +13,6 @@
DropoutLayer,
ForwardLayer,
)
from torch.types import Device

from tests.helpers import configure_test_with_device, get_devices, get_devices_ids


Expand All @@ -32,6 +32,6 @@ def test_forward_model(device: Device) -> None:
[ForwardLayer(neuron_count=1), DropoutLayer(probability=0.5)],
)

fitted_model = model.fit(train_table.to_tabular_dataset("value"), epoch_size=1, learning_rate=0.01)
fitted_model = model.fit(train_table.to_tabular_dataset("value"), epoch_count=1, learning_rate=0.01)
assert fitted_model._model is not None
assert fitted_model._model.state_dict()["_pytorch_layers.0._layer.weight"].device == _get_device()
6 changes: 3 additions & 3 deletions tests/safeds/ml/nn/test_forward_workflow.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
import pytest
from torch.types import Device

from safeds._config import _get_device
from safeds.data.tabular.containers import Table
from safeds.data.tabular.transformation import StandardScaler
Expand All @@ -11,8 +13,6 @@
from safeds.ml.nn.layers import (
ForwardLayer,
)
from torch.types import Device

from tests.helpers import configure_test_with_device, get_devices, get_devices_ids, resolve_resource_path


Expand All @@ -38,7 +38,7 @@ def test_forward_model(device: Device) -> None:
[ForwardLayer(neuron_count=1)],
)

fitted_model = model.fit(train_table.to_tabular_dataset("target"), epoch_size=1, learning_rate=0.01)
fitted_model = model.fit(train_table.to_tabular_dataset("target"), epoch_count=1, learning_rate=0.01)
fitted_model.predict(test_table.remove_columns_except(["value"]))
assert fitted_model._model is not None
assert fitted_model._model.state_dict()["_pytorch_layers.0._layer.weight"].device == _get_device()
8 changes: 4 additions & 4 deletions tests/safeds/ml/nn/test_lstm_workflow.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
import pytest
from torch.types import Device

from safeds._config import _get_device
from safeds.data.tabular.containers import Table
from safeds.data.tabular.transformation import RangeScaler
Expand All @@ -13,8 +15,6 @@
GRULayer,
LSTMLayer,
)
from torch.types import Device

from tests.helpers import configure_test_with_device, get_devices, get_devices_ids, resolve_resource_path


Expand Down Expand Up @@ -45,7 +45,7 @@ def test_lstm_model(device: Device) -> None:
continuous=True,
extra_names=["date"],
),
epoch_size=1,
epoch_count=1,
)

trained_model.predict(test_table)
Expand All @@ -57,7 +57,7 @@ def test_lstm_model(device: Device) -> None:
continuous=False,
extra_names=["date"],
),
epoch_size=1,
epoch_count=1,
)

trained_model_2.predict(test_table)
Expand Down
Loading

0 comments on commit fa62e9b

Please sign in to comment.