-
Notifications
You must be signed in to change notification settings - Fork 49
/
Copy pathWeightedBCECriterion.lua
47 lines (40 loc) · 2.08 KB
/
WeightedBCECriterion.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
--[[
DeepTracking: Seeing Beyond Seeing Using Recurrent Neural Networks.
Copyright (C) 2016 Peter Ondruska, Mobile Robotics Group, University of Oxford
email: [email protected].
webpage: http://mrg.robots.ox.ac.uk/
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
--]]
local WeightedBCECriterion, parent = torch.class('WeightedBCECriterion', 'nn.Criterion')
local eps = 1e-12
function WeightedBCECriterion:updateOutput(input, target)
local target, weights = target[1], target[2]
-- - log(input) * target - log(1 - input) * (1 - target)
self.buffer = self.buffer or torch.Tensor():typeAs(input)
self.buffer:add(input, eps):log():cmul(weights)
self.output = - torch.dot(target, self.buffer)
self.buffer:mul(input, -1):add(1):add(eps):log():cmul(weights)
self.output = (self.output - torch.sum(self.buffer) + torch.dot(target, self.buffer)) / input:nElement()
return self.output
end
function WeightedBCECriterion:updateGradInput(input, target)
local target, weights = target[1], target[2]
-- - (target - input) / ( input (1 - input) )
-- The gradient is slightly incorrect:
-- It should have be divided by (input + eps) (1 - input + eps)
-- but it is divided by input (1 - input + eps) + eps
-- This modification requires less memory to be computed.
self.buffer = self.buffer or torch.Tensor():typeAs(input)
self.buffer:add(input, -1):add(-eps):cmul(input):add(-eps)
self.gradInput:add(target, -1, input):cdiv(self.buffer):cmul(weights):div(target:nElement())
return self.gradInput
end