-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathutils_GP.py
158 lines (118 loc) · 4.88 KB
/
utils_GP.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
import os
import torch
import gpytorch
from utils import Hellinger_dist, Chernoff_dist
smoke_test = ('CI' in os.environ)
training_iter = 2 if smoke_test else 20
# Wrap training, prediction and plotting from the ExactGP-Tutorial into a function,
# so that we do not have to repeat the code later on
def train(model, likelihood, training_iter=training_iter):
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.1) # Includes GaussianLikelihood parameters
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
for i in range(training_iter):
# Zero gradients from previous iteration
optimizer.zero_grad()
# Output from model
output = model(model.train_x)
# Calc loss and backprop gradients
loss = -mll(output, model.train_y)
loss.backward()
optimizer.step()
# print('Iter %d/%d - Loss: %.3f lengthscale: %.3f noise: %.3f' % (
# i + 1, training_iter, loss.item(),
# model.covar_module.lengthscale.item(),
# model.likelihood.noise.item()
# ))
optimizer.step()
if loss < 0: break
def predict(model, likelihood, test_x = torch.linspace(0, 1, 51)):
model.eval()
likelihood.eval()
# Make predictions by feeding model through likelihood
with torch.no_grad(), gpytorch.settings.fast_pred_var():
# Test points are regularly spaced along [0,1]
return likelihood(model(test_x))
'''
Customized kernel of using Squared exponential with Hellinger distance
'''
class SEHellingerKernel(gpytorch.kernels.Kernel):
# the sinc kernel is stationary
is_stationary = True
has_lengthscale = True
# this is the kernel function
def forward(self, x1, x2, **params):
# calculate the distance between inputs
diff = torch.zeros((len(x1), len((x2))))
for i,x in enumerate(x1):
for j,y in enumerate(x2):
diff[i,j] = Hellinger_dist(x, y)
# prevent divide by 0 errors
diff.where(diff == 0, torch.as_tensor(1e-20))
return torch.exp(-0.5 * diff / (self.lengthscale **2))
# return torch.sin(diff).div(diff)
'''
Customized kernel of using Squared exponential with Chernoff distance
'''
class SEChernoffKernel(gpytorch.kernels.Kernel):
# the sinc kernel is stationary
is_stationary = True
has_lengthscale = True
# this is the kernel function
def forward(self, x1, x2, **params):
# calculate the distance between inputs
diff = torch.zeros((len(x1), len((x2))))
for i,x in enumerate(x1):
for j,y in enumerate(x2):
diff[i,j] = Chernoff_dist(x, y)
# prevent divide by 0 errors
diff.where(diff == 0, torch.as_tensor(1e-20))
return torch.exp(-0.5 * diff / (self.lengthscale **2))
# Use the simplest form of GP model, exact inference
class CustomGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood, covar_module):
super().__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = covar_module
self.train_x = train_x
self.train_y = train_y
self.is_sparse = False
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
'''
Customized kernels of using Squared exponential with L1 distance of the sufficient statistic
'''
class L1RBFKernel(gpytorch.kernels.Kernel):
# the sinc kernel is stationary
is_stationary = True
has_lengthscale = True
# this is the kernel function
def forward(self, x1, x2, **params):
# calculate the distance between inputs
diff = torch.zeros((len(x1), len((x2))))
for i,x in enumerate(x1):
for j,y in enumerate(x2):
diff[i,j] = torch.sum(torch.abs(x-y))
# prevent divide by 0 errors
diff.where(diff == 0, torch.as_tensor(1e-20))
return torch.exp(-0.5 * diff / (self.lengthscale **2))
# We will use the simplest form of GP model, exact inference
class ExactGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood, l1=True):
super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
if l1:
self.covar_module = gpytorch.kernels.ScaleKernel(L1RBFKernel())
else:
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
self.train_x = train_x
self.train_y = train_y
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)