-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathnorms.py
45 lines (32 loc) · 1.6 KB
/
norms.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import torch
def sobolev_norm(input, s=1, c=5):
signal_ndim = 2
#fourier transform of input -- [real, imaginary]
real_input = input
imaginary_input = torch.zeros_like(input).cuda()
fourier_transform = torch.fft(torch.stack((real_input, imaginary_input), -1),
signal_ndim=signal_ndim)
#compution the scale \xi
N, M = fourier_transform.shape[2], fourier_transform.shape[2]
ns = torch.arange(0, N).type(torch.FloatTensor).cuda() / N
ms = torch.arange(0, M).type(torch.FloatTensor).cuda() / M
xi_x, xi_y = torch.meshgrid([ms, ns])
squared_xi = xi_x[None, None, :, :] ** 2 +\
xi_y[None, None, :, :] ** 2
scaled_xi = (1 + c * squared_xi) ** (s * 0.5)
#the derivative in Sobolev norm is replaced by multiplication of \xi and fourier transform
derivative = torch.stack([scaled_xi, scaled_xi], -1) * fourier_transform
#final inverse fourier transform
output = torch.ifft(derivative, signal_ndim=signal_ndim)
#we only need the real part as an answer
output = output[..., 0]
return output
def lp_norm(input, p=None):
input = input.view(input.size(0), -1).type(torch.FloatTensor)
#in order to find stable norm the normalization is performed
#\|x\| = alpha * \|(x / alpha)\|
#we will also try to avoid zero elements in alpha
epsilon = 1e-5
alpha, _ = torch.max((torch.abs(input) + epsilon), dim=1)
output = alpha * torch.norm(input / alpha[:, None], p=p, dim=1)
return output