From 333bc64a7d1eeeec17d03c13527e3f2499887829 Mon Sep 17 00:00:00 2001 From: Zezhi Shao <864453277@qq.com> Date: Fri, 15 Dec 2023 11:05:48 +0800 Subject: [PATCH] =?UTF-8?q?style:=20=F0=9F=92=84=20pylint?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- basicts/losses/losses.py | 4 ++-- basicts/utils/m4.py | 25 +++++++++++++------------ 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/basicts/losses/losses.py b/basicts/losses/losses.py index 5d0d3a11..ce8c7aeb 100644 --- a/basicts/losses/losses.py +++ b/basicts/losses/losses.py @@ -8,13 +8,13 @@ def l1_loss(prediction: torch.Tensor, target: torch._tensor, size_average: Optional[bool] = None, reduce: Optional[bool] = None, reduction: str = "mean") -> torch.Tensor: """unmasked mae.""" - return F.l1_loss(prediction, target) + return F.l1_loss(prediction, target, size_average=size_average, reduce=reduce, reduction=reduction) def l2_loss(prediction: torch.Tensor, target: torch.Tensor, size_average: Optional[bool] = None, reduce: Optional[bool] = None, reduction: str = "mean") -> torch.Tensor: """unmasked mse""" - return F.mse_loss(prediction, target) + return F.mse_loss(prediction, target, size_average=size_average, reduce=reduce, reduction=reduction) def masked_mae(prediction: torch.Tensor, target: torch.Tensor, null_val: float = np.nan) -> torch.Tensor: diff --git a/basicts/utils/m4.py b/basicts/utils/m4.py index 5b79823a..40072fd8 100644 --- a/basicts/utils/m4.py +++ b/basicts/utils/m4.py @@ -38,7 +38,7 @@ class M4Dataset: values: np.ndarray @staticmethod - def load(info_file_path: str = None, data: np.array = None) -> 'M4Dataset': + def load(info_file_path: str = None, data: np.array = None) -> "M4Dataset": """ Load cached dataset. @@ -165,20 +165,20 @@ def group_count(group_name): return len(np.where(self.test_set.groups == group_name)[0]) weighted_score = {} - for g in ['Yearly', 'Quarterly', 'Monthly']: + for g in ["Yearly", "Quarterly", "Monthly"]: weighted_score[g] = scores[g] * group_count(g) scores_summary[g] = scores[g] others_score = 0 others_count = 0 - for g in ['Weekly', 'Daily', 'Hourly']: + for g in ["Weekly", "Daily", "Hourly"]: others_score += scores[g] * group_count(g) others_count += group_count(g) - weighted_score['Others'] = others_score - scores_summary['Others'] = others_score / others_count + weighted_score["Others"] = others_score + scores_summary["Others"] = others_score / others_count average = np.sum(list(weighted_score.values())) / len(self.test_set.groups) - scores_summary['Average'] = average + scores_summary["Average"] = average return scores_summary @@ -187,7 +187,8 @@ def m4_summary(save_dir, project_dir): """Summary evaluation for M4 dataset. Args: - save_dir (str): Directory where prediction results are saved. All "{save_dir}/M4_{seasonal pattern}.npy" should exist. Seasonal patterns = ["Yearly", "Quarterly", "Monthly", "Weekly", "Daily", "Hourly"] + save_dir (str): Directory where prediction results are saved. All "{save_dir}/M4_{seasonal pattern}.npy" should exist. + Seasonal patterns = ["Yearly", "Quarterly", "Monthly", "Weekly", "Daily", "Hourly"] project_dir (str): Project directory. The M4 raw data should be in "{project_dir}/datasets/raw_data/M4". """ seasonal_patterns = ["Yearly", "Quarterly", "Monthly", "Weekly", "Daily", "Hourly"] # the order cannot be changed @@ -205,16 +206,16 @@ def build_cache(files: str) -> None: values = row.values timeseries_dict[m4id] = values[~np.isnan(values)] return np.array(list(timeseries_dict.values()), dtype=object) - + print("Building cache for M4 dataset...") # read prediction and ground truth prediction = [] for seasonal_pattern in seasonal_patterns: prediction.extend(np.load(save_dir + "/M4_{0}.npy".format(seasonal_pattern))) prediction = np.array(prediction, dtype=object) - train_values = build_cache('*-train.csv') - test_values = build_cache('*-test.csv') - print("Summarizing M4 dataset...") + train_values = build_cache("*-train.csv") + test_values = build_cache("*-test.csv") + print("Summarizing M4 dataset...") summary = M4Summary(info_file_path, train_values, test_values, data_dir + "/submission-Naive2.csv") - results = pd.DataFrame(summary.evaluate(prediction), index=['SMAPE', 'OWA']) + results = pd.DataFrame(summary.evaluate(prediction), index=["SMAPE", "OWA"]) return results