Skip to content

Commit

Permalink
Introduce long_1 and long_2
Browse files Browse the repository at this point in the history
  • Loading branch information
d.a.bunin committed Sep 8, 2022
1 parent b4a2929 commit 854ac9f
Show file tree
Hide file tree
Showing 10 changed files with 66 additions and 29 deletions.
43 changes: 40 additions & 3 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -65,13 +65,13 @@ jobs:
- name: PyTest ("not long")
run: |
poetry run pytest tests -v --cov=etna -m "not long" --cov-report=xml --durations=10
poetry run pytest tests -v --cov=etna -m "not long_1 and not long_2" --cov-report=xml --durations=10
poetry run pytest etna -v --doctest-modules --durations=10
- name: Upload coverage
uses: codecov/codecov-action@v2

long-test:
long-1-test:
runs-on: ubuntu-latest

steps:
Expand Down Expand Up @@ -103,7 +103,44 @@ jobs:
- name: PyTest ("long")
run: |
poetry run pytest tests -v --cov=etna -m "long" --cov-report=xml --durations=10
poetry run pytest tests -v --cov=etna -m "long_1" --cov-report=xml --durations=10
- name: Upload coverage
uses: codecov/codecov-action@v2

long-2-test:
runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v2

- name: Set up Python
id: setup-python
uses: actions/setup-python@v2
with:
python-version: 3.8

- name: Install Poetry
uses: snok/install-poetry@v1
with:
virtualenvs-create: true
virtualenvs-in-project: true

- name: Load cached venv
id: cached-poetry-dependencies
uses: actions/cache@v2
with:
path: .venv
key: venv-${{ runner.os }}-3.8-${{ hashFiles('**/poetry.lock') }}

- name: Install dependencies
if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true'
run: |
poetry install -E "all tests" -vv
- name: PyTest ("long")
run: |
poetry run pytest tests -v --cov=etna -m "long_2" --cov-report=xml --durations=10
- name: Upload coverage
uses: codecov/codecov-action@v2
Expand Down
4 changes: 2 additions & 2 deletions tests/test_ensembles/test_stacking_ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ def test_forecast_prediction_interval_interface(example_tsds, naive_ensemble: St
assert (segment_slice["target_0.975"] - segment_slice["target_0.025"] >= 0).all()


@pytest.mark.long
@pytest.mark.long_1
def test_multiprocessing_ensembles(
simple_df: TSDataset,
catboost_pipeline: Pipeline,
Expand All @@ -229,7 +229,7 @@ def test_multiprocessing_ensembles(
assert (single_jobs_forecast.df == multi_jobs_forecast.df).all().all()


@pytest.mark.long
@pytest.mark.long_1
@pytest.mark.parametrize("n_jobs", (1, 5))
def test_backtest(stacking_ensemble_pipeline: StackingEnsemble, example_tsds: TSDataset, n_jobs: int):
"""Check that backtest works with StackingEnsemble."""
Expand Down
4 changes: 2 additions & 2 deletions tests/test_ensembles/test_voting_ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def test_forecast_prediction_interval_interface(example_tsds, naive_pipeline_1,
assert (segment_slice["target_0.975"] - segment_slice["target_0.025"] >= 0).all()


@pytest.mark.long
@pytest.mark.long_1
def test_multiprocessing_ensembles(
simple_df: TSDataset,
catboost_pipeline: Pipeline,
Expand All @@ -137,7 +137,7 @@ def test_multiprocessing_ensembles(
assert (single_jobs_forecast.df == multi_jobs_forecast.df).all().all()


@pytest.mark.long
@pytest.mark.long_1
@pytest.mark.parametrize("n_jobs", (1, 5))
def test_backtest(voting_ensemble_pipeline: VotingEnsemble, example_tsds: TSDataset, n_jobs: int):
"""Check that backtest works with VotingEnsemble."""
Expand Down
4 changes: 2 additions & 2 deletions tests/test_models/nn/test_deepar.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def test_fit_wrong_order_transform(weekly_period_df):
model.fit(ts)


@pytest.mark.long
@pytest.mark.long_2
@pytest.mark.parametrize("horizon", [8, 21])
def test_deepar_model_run_weekly_overfit(weekly_period_df, horizon):
"""
Expand Down Expand Up @@ -69,7 +69,7 @@ def test_deepar_model_run_weekly_overfit(weekly_period_df, horizon):
assert mae(ts_test, ts_pred) < 0.2207


@pytest.mark.long
@pytest.mark.long_2
@pytest.mark.parametrize("horizon", [8])
def test_deepar_model_run_weekly_overfit_with_scaler(ts_dataset_weekly_function_with_horizon, horizon):
"""
Expand Down
2 changes: 1 addition & 1 deletion tests/test_models/nn/test_rnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from etna.transforms import StandardScalerTransform


@pytest.mark.long
@pytest.mark.long_2
@pytest.mark.parametrize("horizon", [8, 13])
def test_rnn_model_run_weekly_overfit_with_scaler(ts_dataset_weekly_function_with_horizon, horizon):
"""
Expand Down
4 changes: 2 additions & 2 deletions tests/test_models/nn/test_tft.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def test_fit_wrong_order_transform(weekly_period_df):
model.fit(ts)


@pytest.mark.long
@pytest.mark.long_2
@pytest.mark.parametrize("horizon", [8, 21])
def test_tft_model_run_weekly_overfit(ts_dataset_weekly_function_with_horizon, horizon):
"""
Expand Down Expand Up @@ -64,7 +64,7 @@ def test_tft_model_run_weekly_overfit(ts_dataset_weekly_function_with_horizon, h
assert mae(ts_test, ts_pred) < 0.24


@pytest.mark.long
@pytest.mark.long_2
@pytest.mark.parametrize("horizon", [8])
def test_tft_model_run_weekly_overfit_with_scaler(ts_dataset_weekly_function_with_horizon, horizon):
"""
Expand Down
22 changes: 11 additions & 11 deletions tests/test_models/test_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def _test_forecast_mixed_in_out_sample(ts, model, transforms):
assert_frame_equal(forecast_out_sample_df, forecast_full_df.iloc[-5:])


@pytest.mark.long
@pytest.mark.long_1
@pytest.mark.parametrize(
"model, transforms",
[
Expand All @@ -166,7 +166,7 @@ def test_forecast_in_sample_full(model, transforms, example_tsds):
_test_forecast_in_sample_full(example_tsds, model, transforms)


@pytest.mark.long
@pytest.mark.long_1
@pytest.mark.xfail(strict=True)
@pytest.mark.parametrize(
"model, transforms",
Expand All @@ -182,7 +182,7 @@ def test_forecast_in_sample_full_failed(model, transforms, example_tsds):
_test_forecast_in_sample_full(example_tsds, model, transforms)


@pytest.mark.long
@pytest.mark.long_1
@pytest.mark.parametrize(
"model, transforms",
[
Expand Down Expand Up @@ -221,7 +221,7 @@ def test_forecast_in_sample_full_not_implemented(model, transforms, example_tsds
_test_forecast_in_sample_full(example_tsds, model, transforms)


@pytest.mark.long
@pytest.mark.long_1
@pytest.mark.parametrize(
"model, transforms",
[
Expand All @@ -246,7 +246,7 @@ def test_forecast_in_sample_suffix(model, transforms, example_tsds):
_test_forecast_in_sample_suffix(example_tsds, model, transforms)


@pytest.mark.long
@pytest.mark.long_1
@pytest.mark.parametrize(
"model, transforms",
[
Expand Down Expand Up @@ -285,7 +285,7 @@ def test_forecast_in_sample_suffix_not_implemented(model, transforms, example_ts
_test_forecast_in_sample_suffix(example_tsds, model, transforms)


@pytest.mark.long
@pytest.mark.long_1
@pytest.mark.parametrize(
"model, transforms",
[
Expand Down Expand Up @@ -338,7 +338,7 @@ def test_forecast_out_sample_prefix(model, transforms, example_tsds):
_test_forecast_out_sample_prefix(example_tsds, model, transforms)


@pytest.mark.long
@pytest.mark.long_1
@pytest.mark.parametrize(
"model, transforms",
[
Expand All @@ -362,7 +362,7 @@ def test_forecast_out_sample_suffix(model, transforms, example_tsds):
_test_forecast_out_sample_suffix(example_tsds, model, transforms)


@pytest.mark.long
@pytest.mark.long_1
@pytest.mark.parametrize(
"model, transforms",
[
Expand Down Expand Up @@ -399,7 +399,7 @@ def test_forecast_out_sample_suffix_not_implemented(model, transforms, example_t
_test_forecast_out_sample_suffix(example_tsds, model, transforms)


@pytest.mark.long
@pytest.mark.long_1
@pytest.mark.xfail(strict=True)
@pytest.mark.parametrize(
"model, transforms",
Expand All @@ -413,7 +413,7 @@ def test_forecast_out_sample_suffix_failed(model, transforms, example_tsds):
_test_forecast_out_sample_suffix(example_tsds, model, transforms)


@pytest.mark.long
@pytest.mark.long_1
@pytest.mark.parametrize(
"model, transforms",
[
Expand All @@ -435,7 +435,7 @@ def test_forecast_mixed_in_out_sample(model, transforms, example_tsds):
_test_forecast_mixed_in_out_sample(example_tsds, model, transforms)


@pytest.mark.long
@pytest.mark.long_1
@pytest.mark.parametrize(
"model, transforms",
[
Expand Down
6 changes: 3 additions & 3 deletions tests/test_models/test_tbats.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def test_not_fitted(model, linear_segments_ts_unique):
model.forecast(to_forecast)


@pytest.mark.long
@pytest.mark.long_2
@pytest.mark.parametrize("model", [TBATSModel(), BATSModel()])
def test_format(model, new_format_df):
df = new_format_df
Expand All @@ -97,7 +97,7 @@ def test_format(model, new_format_df):
assert not future_ts.isnull().values.any()


@pytest.mark.long
@pytest.mark.long_2
@pytest.mark.parametrize("model", [TBATSModel(), BATSModel()])
def test_dummy(model, sinusoid_ts):
train, test = sinusoid_ts
Expand All @@ -109,7 +109,7 @@ def test_dummy(model, sinusoid_ts):
assert value_metric < 0.33


@pytest.mark.long
@pytest.mark.long_2
@pytest.mark.parametrize("model", [TBATSModel(), BATSModel()])
def test_prediction_interval(model, example_tsds):
model.fit(example_tsds)
Expand Down
2 changes: 1 addition & 1 deletion tests/test_pipeline/test_autoregressive_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def test_forecast_raise_error_if_not_fitted():
_ = pipeline.forecast()


@pytest.mark.long
@pytest.mark.long_1
def test_backtest_with_n_jobs(big_example_tsdf: TSDataset):
"""Check that AutoRegressivePipeline.backtest gives the same results in case of single and multiple jobs modes."""
# create a pipeline
Expand Down
4 changes: 2 additions & 2 deletions tests/test_pipeline/test_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,7 @@ def test_get_fold_info_interface_hours(catboost_pipeline: Pipeline, example_tsdf
assert expected_columns == sorted(info_df.columns)


@pytest.mark.long
@pytest.mark.long_1
def test_backtest_with_n_jobs(catboost_pipeline: Pipeline, big_example_tsdf: TSDataset):
"""Check that Pipeline.backtest gives the same results in case of single and multiple jobs modes."""
ts1 = deepcopy(big_example_tsdf)
Expand Down Expand Up @@ -530,7 +530,7 @@ def test_sanity_backtest_naive_with_intervals(weekly_period_ts):
assert f"target_{quantiles[1]}" in features


@pytest.mark.long
@pytest.mark.long_1
def test_backtest_pass_with_filter_transform(ts_with_feature):
ts = ts_with_feature

Expand Down

0 comments on commit 854ac9f

Please sign in to comment.