-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathloss.py
151 lines (111 loc) · 4.65 KB
/
loss.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
import numpy as np
# import tensorflow as tf
import tensorflow.compat.v1 as tf
# from keras import backend as K
import tensorflow.keras.backend as K
def _nan2zero(x):
return tf.where(tf.is_nan(x), tf.zeros_like(x), x)
def _nan2inf(x):
return tf.where(tf.is_nan(x), tf.zeros_like(x)+np.inf, x)
def _nelem(x):
nelem = tf.reduce_sum(tf.cast(~tf.is_nan(x), tf.float32))
return tf.cast(tf.where(tf.equal(nelem, 0.), 1., nelem), x.dtype)
def _reduce_mean(x):
nelem = _nelem(x)
x = _nan2zero(x)
return tf.divide(tf.reduce_sum(x), nelem)
def mse_loss(y_true, y_pred):
ret = tf.square(y_pred - y_true)
return _reduce_mean(ret)
def poisson_loss(y_true, y_pred):
y_pred = tf.cast(y_pred, tf.float32)
y_true = tf.cast(y_true, tf.float32)
nelem = _nelem(y_true)
y_true = _nan2zero(y_true)
ret = y_pred - y_true*tf.log(y_pred+1e-10) + tf.lgamma(y_true+1.0)
return tf.divide(tf.reduce_sum(ret), nelem)
class NB(object):
def __init__(self, theta=None, masking=False, scope='nbinom_loss/',
scale_factor=1.0, debug=False):
# for numerical stability
self.eps = 1e-10
self.scale_factor = scale_factor
self.debug = debug
self.scope = scope
self.masking = masking
self.theta = theta
def loss(self, y_true, y_pred, mean=True):
scale_factor = self.scale_factor
eps = self.eps
with tf.name_scope(self.scope):
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32) * scale_factor
if self.masking:
nelem = _nelem(y_true)
y_true = _nan2zero(y_true)
# Clip theta
theta = tf.minimum(self.theta, 1e6)
t1 = tf.lgamma(theta+eps) + tf.lgamma(y_true+1.0) - tf.lgamma(y_true+theta+eps)
t2 = (theta+y_true) * tf.log(1.0 + (y_pred/(theta+eps))) + (y_true * (tf.log(theta+eps) - tf.log(y_pred+eps)))
if self.debug:
assert_ops = [
tf.verify_tensor_all_finite(y_pred, 'y_pred has inf/nans'),
tf.verify_tensor_all_finite(t1, 't1 has inf/nans'),
tf.verify_tensor_all_finite(t2, 't2 has inf/nans')]
tf.summary.histogram('t1', t1)
tf.summary.histogram('t2', t2)
with tf.control_dependencies(assert_ops):
final = t1 + t2
else:
final = t1 + t2
final = _nan2inf(final)
if mean:
if self.masking:
final = tf.divide(tf.reduce_sum(final), nelem)
else:
final = tf.reduce_mean(final)
return final
class ZINB(NB):
def __init__(self, pi, ridge_lambda=0.0, scope='zinb_loss/', **kwargs):
super().__init__(scope=scope, **kwargs)
self.pi = pi
self.ridge_lambda = ridge_lambda
def loss(self, y_true, y_pred, mean=True):
scale_factor = self.scale_factor
eps = self.eps
with tf.name_scope(self.scope):
# reuse existing NB neg.log.lik.
# mean is always False here, because everything is calculated
# element-wise. we take the mean only in the end
nb_case = super().loss(y_true, y_pred, mean=False) - tf.log(1.0-self.pi+eps)
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32) * scale_factor
theta = tf.minimum(self.theta, 1e6)
zero_nb = tf.pow(theta/(theta+y_pred+eps), theta)
zero_case = -tf.log(self.pi + ((1.0-self.pi)*zero_nb)+eps)
result = tf.where(tf.less(y_true, 1e-8), zero_case, nb_case)
ridge = self.ridge_lambda*tf.square(self.pi)
result += ridge
if mean:
if self.masking:
result = _reduce_mean(result)
else:
result = tf.reduce_mean(result)
result = _nan2inf(result)
if self.debug:
tf.summary.histogram('nb_case', nb_case)
tf.summary.histogram('zero_nb', zero_nb)
tf.summary.histogram('zero_case', zero_case)
tf.summary.histogram('ridge', ridge)
return result
def dist_loss(data, min_dist, max_dist = 20):
pairwise_dist = cdisttf(data, data)
dist = pairwise_dist - min_dist
bigdist = max_dist - pairwise_dist
loss = tf.math.exp(-dist) + tf.math.exp(-bigdist)
return loss
def cdisttf(data_1, data_2):
prod = tf.math.reduce_sum(
(tf.expand_dims(data_1, 1) - tf.expand_dims(data_2, 0)) ** 2, 2
)
return (prod + 1e-10) ** (1 / 2)