-
Notifications
You must be signed in to change notification settings - Fork 9
/
FedServer.py
95 lines (86 loc) · 4.29 KB
/
FedServer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
from modelUtil import *
from collections import OrderedDict
import opacus
from opacus.validators import ModuleValidator
class CDPServer:
def __init__(self, device, model, input_shape, n_classes, noise_multiplier=1, sample_clients=10, disc_lr=1):
if 'linear_model' in model:
self.model = globals()[model](num_classes=n_classes, input_shape=input_shape)
else:
self.model = globals()[model](num_classes=n_classes)
self.disc_lr = disc_lr
self.device = device
self.sample_clients = sample_clients
self.noise_multiplier = noise_multiplier
self.trainable_names = [k for k, _ in self.model.named_parameters()]
self.agg = True
if "IN" in model:
self.agg = False
def get_median_norm(self, weights):
median_norm = OrderedDict()
for k, v in self.model.named_parameters():
norms = []
for i in range(len(weights)):
grad = v.detach()-weights[i][k]
norms.append(grad.norm(2))
median_norm[k] = min(median(norms), 10)
# print(median_norm)
return median_norm
def get_model_state_dict(self):
return self.model.state_dict()
def agg_updates(self, weights):
with torch.no_grad():
norms = self.get_median_norm(weights)
if self.agg == False:
for k, v in self.get_model_state_dict().items():
if 'bn' not in k and 'norm' not in k and 'downsample.1' not in k:
sumed_grad = torch.zeros_like(v)
for i in range(len(weights)):
grad = weights[i][k]-v
grad = grad*min(1, norms[k]/grad.norm(2))
sumed_grad += grad
sigma = norms[k]*self.noise_multiplier
sumed_grad += torch.normal(0, sigma, v.shape)
value = v + sumed_grad/self.sample_clients
self.model.state_dict()[k].data.copy_(value.detach().clone())
else:
for k, v in self.get_model_state_dict().items():
if 'bn' not in k:
sumed_grad = torch.zeros_like(v)
for i in range(len(weights)):
grad = weights[i][k]-v
grad = grad*min(1, norms[k]/grad.norm(2))
sumed_grad += grad
sigma = norms[k]*self.noise_multiplier
sumed_grad += torch.normal(0, sigma, v.shape)
value = v + sumed_grad/self.sample_clients
self.model.state_dict()[k].data.copy_(value.detach().clone())
class LDPServer(CDPServer):
def __init__(self, device, model, n_classes, input_shape, noise_multiplier=1, sample_clients=10, disc_lr=1):
super().__init__(device, model, n_classes, input_shape, noise_multiplier, sample_clients, disc_lr)
self.model = ModuleValidator.fix(self.model)
self.privacy_engine = opacus.PrivacyEngine()
self.model = self.privacy_engine._prepare_model(self.model)
self.agg = True
if "IN" in model:
self.agg = False
def agg_updates(self, weights):
with torch.no_grad():
if self.agg == False:
for k, v in self.get_model_state_dict().items():
if 'bn' not in k and 'norm' not in k and 'downsample.1' not in k:
sumed_grad = torch.zeros_like(v)
for i in range(len(weights)):
grad = weights[i][k]-v
sumed_grad += grad
value = v + sumed_grad/self.sample_clients
self.model.state_dict()[k].data.copy_(value.detach().clone())
else:
for k, v in self.get_model_state_dict().items():
if 'bn' not in k:
sumed_grad = torch.zeros_like(v)
for i in range(len(weights)):
grad = weights[i][k]-v
sumed_grad += grad
value = v + sumed_grad/self.sample_clients
self.model.state_dict()[k].data.copy_(value.detach().clone())