-
Notifications
You must be signed in to change notification settings - Fork 0
/
class_balanced_loss_pytorch.py
103 lines (87 loc) · 4.13 KB
/
class_balanced_loss_pytorch.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import torch
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
"""All credits to:
Pytorch implementation of Class-Balanced-Loss
Reference: "Class-Balanced Loss Based on Effective Number of Samples"
Authors: Yin Cui and
Menglin Jia and
Tsung Yi Lin and
Yang Song and
Serge J. Belongie
https://arxiv.org/abs/1901.05555, CVPR'19.
"""
"""Modified by me
"""
def get_device():
return torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def focal_loss(labels, logits, alpha, gamma):
"""Compute the focal loss between `logits` and the ground truth `labels`.
Focal loss = -alpha_t * (1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
pt = p (if true class), otherwise pt = 1 - p. p = sigmoid(logit).
Args:
labels: A float tensor of size [batch, num_classes].
logits: A float tensor of size [batch, num_classes].
alpha: A float tensor of size [batch_size]
specifying per-example weight for balanced cross entropy.
gamma: A float scalar modulating loss from hard and easy examples.
Returns:
focal_loss: A float32 scalar representing normalized total loss.
"""
BCLoss = F.binary_cross_entropy_with_logits(input = logits, target = labels,reduction = "none")
if gamma == 0.0:
modulator = 1.0
else:
modulator = torch.exp(-gamma * labels * logits - gamma * torch.log(1 +
torch.exp(-1.0 * logits)))
loss = modulator * BCLoss
weighted_loss = alpha * loss
focal_loss = torch.sum(weighted_loss)
focal_loss /= torch.sum(labels)
return focal_loss
class CB_loss(nn.Module):
"""Compute the Class Balanced Loss between `logits` and the ground truth `labels`.
Class Balanced Loss: ((1-beta)/(1-beta^n))*Loss(labels, logits)
where Loss is one of the standard losses used for Neural Networks.
Args:
labels: A int tensor of size [batch].
logits: A float tensor of size [batch, no_of_classes].
samples_per_cls: A python list of size [no_of_classes].
no_of_classes: total number of classes. int
loss_type: string. One of "sigmoid", "focal", "softmax".
beta: float. Hyperparameter for Class balanced loss.
gamma: float. Hyperparameter for Focal loss.
Returns:
cb_loss: A float tensor representing class balanced loss
"""
def __init__(self, samples_per_cls, no_of_classes, loss_type, beta, gamma, device):
super(CB_loss, self).__init__()
self.samples_per_cls = samples_per_cls
self.no_of_classes = no_of_classes
self.loss_type = loss_type
self.beta = beta
self.gamma = gamma
self.device = device
def forward(self, preds, truth):
effective_num = 1.0 - np.power(self.beta, self.samples_per_cls)
weights = (1.0 - self.beta) / np.array(effective_num)
weights = weights / np.sum(weights) * self.no_of_classes
labels_one_hot = F.one_hot(truth, self.no_of_classes).float()
weights = torch.tensor(weights, device=self.device).float()
weights = weights.unsqueeze(0)
weights = weights.repeat(labels_one_hot.shape[0],1) * labels_one_hot
weights = weights.sum(1)
weights = weights.unsqueeze(1)
weights = weights.repeat(1, self.no_of_classes)
if self.loss_type == "focal":
cb_loss = focal_loss(labels_one_hot, preds, weights, self.gamma)
elif self.loss_type == "sigmoid":
cb_loss = F.binary_cross_entropy_with_logits(input=preds, target = labels_one_hot, weights = weights)
elif self.loss_type == "softmax":
pred = preds.softmax(dim = 1)
cb_loss = F.binary_cross_entropy(input=pred, target=labels_one_hot, weight=weights)
return cb_loss
# USAGE - replace where you set your loss function with (set your own arguments):
loss_fn = CB_loss(samples_per_cls=[1200, 100, 200], no_of_classes=3, loss_type='focal', beta=0.999, gamma=2.0, device=get_device())