-
Notifications
You must be signed in to change notification settings - Fork 10
/
utils.py
145 lines (129 loc) · 5.8 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
# Nicola Dinsdale 2020
# Useful functions for training the model
# Args: Class of useful values
# Early stopping: exactly that
# Load pretrained model: loads statedict into model
########################################################################################################################
import torch
import numpy as np
class Args:
# Store lots of the parameters that we might need to train the model
def __init__(self):
self.batch_size = 8
self.log_interval = 10
self.learning_rate = 1e-4
self.epochs = 2
self.train_val_prop = 0.9
self.patience = 5
self.channels_first = True
self.diff_model_flag = False
self.alpha = 1
self.beta = 10
self.epoch_stage_1 = 100
self.epoch_reached = 1
class EarlyStopping:
# Early stops the training if the validation loss doesnt improve after a given patience
def __init__(self, patience=5, verbose=False):
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
def __call__(self, val_loss, model, epoch, optimizer, loss, PTH):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model, epoch, optimizer, loss, PTH)
elif score < self.best_score:
self.counter += 1
print('Early Stopping Counter: ', self.counter, '/', self.patience)
if self .counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model, epoch, optimizer, loss, PTH)
self.counter = 0
def save_checkpoint(self, val_loss, model, epoch, optimizer, loss, PTH):
# Saves the model when the validation loss decreases
if self.verbose:
print('Validation loss decreased: ', self.val_loss_min, ' --> ', val_loss, 'Saving model ...')
torch.save({'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss}, PTH)
class EarlyStopping_split_models:
# Early stops the training if the validation loss doesnt improve after a given patience
def __init__(self, patience=5, verbose=False):
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
def __call__(self, val_loss, model, epoch, optimizer, loss, PTH):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model, epoch, optimizer, loss, PTH)
elif score < self.best_score:
self.counter += 1
print('Early Stopping Counter: ', self.counter, '/', self.patience)
if self .counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model, epoch, optimizer, loss, PTH)
self.counter = 0
def save_checkpoint(self, val_loss, models, epoch, optimizer, loss, PTHS):
# Saves the model when the validation loss decreases
if self.verbose:
print('Validation loss decreased: ', self.val_loss_min, ' --> ', val_loss, 'Saving model ...')
[encoder, regressor] = models
[PATH_ENCODER, PATH_REGRESSOR] = PTHS
torch.save(encoder.state_dict(), PATH_ENCODER)
torch.save(regressor.state_dict(), PATH_REGRESSOR)
class EarlyStopping_unlearning:
# Early stops the training if the validation loss doesnt improve after a given patience
def __init__(self, patience=5, verbose=False):
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
def __call__(self, val_loss, model, epoch, optimizer, loss, PTH):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model, epoch, optimizer, loss, PTH)
elif score < self.best_score:
self.counter += 1
print('Early Stopping Counter: ', self.counter, '/', self.patience)
if self .counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model, epoch, optimizer, loss, PTH)
self.counter = 0
def save_checkpoint(self, val_loss, models, epoch, optimizer, loss, PTHS):
# Saves the model when the validation loss decreases
if self.verbose:
print('Validation loss decreased: ', self.val_loss_min, ' --> ', val_loss, 'Saving model ...')
[encoder, regressor, domain_predictor] = models
[PATH_ENCODER, PATH_REGRESSOR, PATH_DOMAIN] = PTHS
if PATH_ENCODER:
torch.save(encoder.state_dict(), PATH_ENCODER)
if PATH_REGRESSOR:
torch.save(regressor.state_dict(), PATH_REGRESSOR)
if PATH_DOMAIN:
torch.save(domain_predictor.state_dict(), PATH_DOMAIN)
def load_pretrained_model(checkpoint, model):
model_dict = model.state_dict()
pretrained_dict = checkpoint['model_state_dict']
# Filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
print(pretrained_dict)
#pretrained_dict = {k: v for k, v in pretrained_dict.items() if 'regressor' not in k}
model_dict.update(pretrained_dict)
return model_dict