From 3a0a90a67f9a6757e47fabb3041f8ca7cc4373a7 Mon Sep 17 00:00:00 2001 From: Sebastian Date: Thu, 24 Nov 2022 08:47:55 +0100 Subject: [PATCH] Fixing metrics calculation in ClassificationSolver Typo: "macro" vs. "micro" for micro_f1_score and using "macro" precision, recall, f1 score for binary classification as suggested here: https://stats.stackexchange.com/questions/99694/what-does-it-imply-if-accuracy-and-recall-are-the-same --- biotrainer/solvers/ClassificationSolver.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/biotrainer/solvers/ClassificationSolver.py b/biotrainer/solvers/ClassificationSolver.py index 2e9d53ef..a85440c8 100644 --- a/biotrainer/solvers/ClassificationSolver.py +++ b/biotrainer/solvers/ClassificationSolver.py @@ -23,7 +23,7 @@ def __init__(self, *args, **kwargs): self.recall_per_class = Recall(average="none", num_classes=self.num_classes) self.macro_f1_score = F1Score(average="macro", num_classes=self.num_classes) - self.micro_f1_score = F1Score(average="macro", num_classes=self.num_classes) + self.micro_f1_score = F1Score(average="micro", num_classes=self.num_classes) self.f1_per_class = F1Score(average="none", num_classes=self.num_classes) self.scc = SpearmanCorrCoef() @@ -56,9 +56,9 @@ def _compute_metrics( metrics_dict.update(f1scores) # Binary prediction else: - metrics_dict['precision'] = self.micro_precision(predicted.cpu(), labels.cpu()).item() - metrics_dict['recall'] = self.micro_recall(predicted.cpu(), labels.cpu()).item() - metrics_dict['f1_score'] = self.micro_f1_score(predicted.cpu(), labels.cpu()).item() + metrics_dict['precision'] = self.macro_precision(predicted.cpu(), labels.cpu()).item() + metrics_dict['recall'] = self.macro_recall(predicted.cpu(), labels.cpu()).item() + metrics_dict['f1_score'] = self.macro_f1_score(predicted.cpu(), labels.cpu()).item() metrics_dict['spearmans-corr-coeff'] = self.scc(predicted.cpu().float(), labels.cpu().float()).item() metrics_dict['matthews-corr-coeff'] = self.mcc(predicted.cpu(), labels.cpu()).item()