-
Notifications
You must be signed in to change notification settings - Fork 10
/
ema.py
60 lines (49 loc) · 1.67 KB
/
ema.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import torch
import torch.distributed as dist
class EMA(object):
def __init__(self, model, alpha):
self.step = 0
self.model = model
self.alpha = alpha
self.shadow = self.get_model_state()
self.backup = {}
self.param_keys = [k for k, _ in self.model.named_parameters()]
self.buffer_keys = [k for k, _ in self.model.named_buffers()]
def update_params(self):
decay = min(self.alpha, (self.step + 1) / (self.step + 10))
state = self.model.state_dict()
for name in self.param_keys:
self.shadow[name].copy_(
decay * self.shadow[name]
+ (1 - decay) * state[name]
)
# for name in self.buffer_keys:
# self.shadow[name].copy_(
# decay * self.shadow[name]
# + (1 - decay) * state[name]
# )
self.step += 1
def update_buffer(self):
state = self.model.state_dict()
for name in self.buffer_keys:
self.shadow[name].copy_(state[name])
def apply_shadow(self):
self.backup = self.get_model_state()
self.model.load_state_dict(self.shadow)
def restore(self):
self.model.load_state_dict(self.backup)
def get_model_state(self):
return {
k: v.clone().detach()
for k, v in self.model.state_dict().items()
}
if __name__ == '__main__':
print('=====')
model = torch.nn.BatchNorm1d(5)
ema = EMA(model, 0.9, 0.02, 0.002)
inten = torch.randn(10, 5)
out = model(inten)
ema.update_params()
print(model.state_dict())
ema.update_buffer()
print(model.state_dict())