-
Notifications
You must be signed in to change notification settings - Fork 2
/
test.py
98 lines (77 loc) · 3.7 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import argparse
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from gcns import GCNS1,GCNS2,GCNS3,GCNS4
from dbgcn_utils import generate_dataset, load_data, get_normalized_adj
use_gpu = False
num_timesteps_input = 12
num_timesteps_output = 3
epochs = 5
batch_size = 50
parser = argparse.ArgumentParser(description='DBGCN')
parser.add_argument('--enable_cuda', action='store_true', help='Enable CUDA')
parser.add_argument('--data',type=str,default='data/node_values15.npy',help='data path')
parser.add_argument('--adjdata',type=str,default='data/adj_mat15_d.npy',help='adj data path')
args = parser.parse_args()
args.device = None
if args.enable_cuda and torch.cuda.is_available():
args.device = torch.device('cuda')
else:
args.device = torch.device('cpu')
if __name__ == '__main__':
torch.manual_seed(7)
A, X = load_data(args.data, args.adjdata)
split_line2 = int(X.shape[2] * 0.8)
test_original_data = X[:, :, split_line2:]
test_input, test_target = generate_dataset(test_original_data,
num_timesteps_input=num_timesteps_input,
num_timesteps_output=num_timesteps_output)
print("*********** Test Data load successfully! *********")
A_wave = get_normalized_adj(A)
A_wave = torch.from_numpy(A_wave)
A_wave = A_wave.to(device=args.device)
## The model performance under GCNS1 structure is the best
net = GCNS1(A_wave.shape[0],
training_input.shape[3],
num_timesteps_input,
num_timesteps_output).to(device=args.device)
optimizer = torch.optim.Adam(net.parameters(), lr=1e-3)
loss_criterion = nn.MSELoss()
training_losses = []
validation_losses = []
validation_maes = []
validation_mapes = []
validation_rmses = []
for epoch in range(epochs):
print("############### training ##############")
loss = train_epoch(training_input, training_target, batch_size=batch_size)
training_losses.append(loss)
print("############### validation ##############")
# Run validation
with torch.no_grad():
net.eval()
val_input = val_input.to(device=args.device)
val_target = val_target.to(device=args.device)
out = net(A_wave, val_input)
val_loss = loss_criterion(out, val_target).to(device="cpu")
validation_losses.append(np.asscalar(val_loss.detach().numpy()))
out_unnormalized = out.detach().cpu().numpy()
target_unnormalized = val_target.detach().cpu().numpy()
mae = np.mean(np.absolute(out_unnormalized - target_unnormalized))
mape = np.mean(np.absolute(out_unnormalized - target_unnormalized)/target_unnormalized)
rmse = np.sqrt(np.mean((out_unnormalized - target_unnormalized)**2))
validation_maes.append(mae)
validation_mapes.append(mape)
validation_rmses.append(rmse)
out = None
val_input = val_input.to(device="cpu")
val_target = val_target.to(device="cpu")
print("Epoch {:03d}--Training loss: {:.4f}".format(epoch, training_losses[-1]))
print("Epoch {:03d}--Validation loss: {:.4f}".format(epoch, validation_losses[-1]))
print("Epoch {:03d}--Validation MAE: {:.4f}--Validation MAPE: {:.4f}--Validation RMSE: {:.4f}".format(epoch, validation_maes[-1], validation_mapes[-1], validation_rmses[-1]))
plt.plot(training_losses, label="training loss")
plt.plot(validation_losses, label="validation loss")
plt.legend()
plt.show()