-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathmain.py
executable file
·131 lines (116 loc) · 5.68 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
#!/usr/bin/env python
from comet_ml import Experiment
import h5py
import matplotlib.pyplot as plt
import numpy as np
import argparse
import importlib
import random
import os
from algorithms.server.server import Server
from algorithms.trainmodel.models import *
from utils.plot_utils import *
import torch
torch.manual_seed(0)
def main(experiment, dataset, algorithm, model, batch_size, learning_rate, alpha, eta, L, rho, num_glob_iters,
local_epochs, optimizer, numedges, times, commet, gpu):
device = torch.device("cuda:{}".format(gpu) if torch.cuda.is_available() and gpu != -1 else "cpu")
for i in range(times):
print("---------------Running time:------------",i)
# Generate model
if(model == "mclr"):
if(dataset == "human_activity"):
model = Mclr_Logistic(561,6).to(device), model
else:
model = Mclr_Logistic().to(device), model
if(model == "linear_regression"):
model = Linear_Regression(40,1).to(device), model
if model == "logistic_regression":
model = Logistic_Regression(40).to(device), model
# select algorithm
if(commet):
experiment.set_name(dataset + "_" + algorithm + "_" + model[1] + "_" + str(batch_size) + "b_" + str(learning_rate) + "lr_" + str(alpha) + "al_" + str(eta) + "eta_" + str(L) + "L_" + str(rho) + "p_" + str(num_glob_iters) + "ge_"+ str(local_epochs) + "le_"+ str(numedges) +"u")
server = Server(experiment, device, dataset, algorithm, model, batch_size, learning_rate, alpha, eta, L, num_glob_iters, local_epochs, optimizer, numedges, i)
server.train()
server.test()
# Average data
#average_data(num_users=numedges, loc_ep1=local_epochs, Numb_Glob_Iters=num_glob_iters, lamb=L, learning_rate=learning_rate, alpha, algorithms=algorithm, batch_size=batch_size, dataset=dataset, times = times)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, default="human_activity", choices=["Mnist", "Linear_synthetic", "Fashion_Mnist", "Cifar10" ,"human_activity", "Nist"])
parser.add_argument("--model", type=str, default="mclr", choices=["linear_regression", "mclr", "logistic_regression"])
parser.add_argument("--batch_size", type=int, default=0)
parser.add_argument("--learning_rate", type=float, default=1, help="Local learning rate for DANE, GD")
parser.add_argument("--alpha", type=float, default=0.03, help="alpha for DONE and Newton using in richason interation")
parser.add_argument("--eta", type=float, default=1.0, help = "eta is parameter for DANE")
parser.add_argument("--L", type=int, default=0, help="Regularization term")
parser.add_argument("--rho", type=int, default=0, help="Condition number")
parser.add_argument("--num_global_iters", type=int, default=100)
parser.add_argument("--local_epochs", type=int, default=20)
parser.add_argument("--optimizer", type=str, default="SGD",choices=["SGD"])
parser.add_argument("--algorithm", type=str, default="DANE",choices=["DONE", "GD", "DANE", "Newton","GT","PGT"])
parser.add_argument("--numedges", type=int, default=32,help="Number of Edges per round")
parser.add_argument("--times", type=int, default=1, help="running time")
parser.add_argument("--commet", type=int, default=1, help="log data to comet")
parser.add_argument("--gpu", type=int, default=0, help="Which GPU to run the experiments")
args = parser.parse_args()
print("=" * 80)
print("Summary of training process:")
print("Algorithm : {}".format(args.algorithm))
print("Batch size : {}".format(args.batch_size))
print("Learing rate : {}".format(args.learning_rate))
print("alpha : {}".format(args.alpha))
print("Subset of edges : {}".format(args.numedges))
print("Number of local rounds : {}".format(args.local_epochs))
print("Number of global rounds : {}".format(args.num_global_iters))
print("Dataset : {}".format(args.dataset))
print("Local Model : {}".format(args.model))
print("=" * 80)
# Create an experiment with your api key:
if(args.commet):
# Create an experiment with your api key:
# import comet_ml at the top of your file
# Create an experiment with your api key:
experiment = Experiment(
api_key="VtHmmkcG2ngy1isOwjkm5sHhP",
project_name="done-supplement",
workspace="federated-learning-exp",
)
hyper_params = {
"dataset":args.dataset,
"algorithm" : args.algorithm,
"model":args.model,
"batch_size":args.batch_size,
"learning_rate":args.learning_rate,
"alpha" : args.alpha,
"L" : args.L,
"rho" : args.rho,
"num_glob_iters":args.num_global_iters,
"local_epochs":args.local_epochs,
"optimizer": args.optimizer,
"numusers": args.numedges,
"times" : args.times,
"gpu": args.gpu
}
experiment.log_parameters(hyper_params)
else:
experiment = 0
main(
experiment= experiment,
dataset=args.dataset,
algorithm = args.algorithm,
model=args.model,
batch_size=args.batch_size,
learning_rate=args.learning_rate,
alpha = args.alpha,
eta = args.eta,
L = args.L,
rho = args.rho,
num_glob_iters=args.num_global_iters,
local_epochs=args.local_epochs,
optimizer= args.optimizer,
numedges=args.numedges,
times = args.times,
commet = args.commet,
gpu=args.gpu
)