From f964577810a75889e7d59c5931fd98aea35d95fd Mon Sep 17 00:00:00 2001 From: "yuexiang.xyx" Date: Fri, 12 Aug 2022 16:08:03 +0800 Subject: [PATCH] bugfix for scripts --- README.md | 4 +-- federatedscope/core/workers/server.py | 2 +- scripts/README.md | 30 +++++++++++-------- .../privacy_attack/PIA_toy.yaml | 2 +- .../distributed_scripts/run_distributed_lr.sh | 2 +- scripts/example_configs/asyn_cifar10.yaml | 2 +- 6 files changed, 24 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index 1a4518d5a..60ecbbe2a 100644 --- a/README.md +++ b/README.md @@ -163,7 +163,7 @@ Here we demonstrate how to run a standard FL task with FederatedScope, with sett # Run with default configurations python federatedscope/main.py --cfg scripts/example_configs/femnist.yaml # Or with custom configurations -python federatedscope/main.py --cfg scripts/example_configs/femnist.yaml federated.total_round_num 50 data.batch_size 128 +python federatedscope/main.py --cfg scripts/example_configs/femnist.yaml federate.total_round_num 50 data.batch_size 128 ``` Then you can observe some monitored metrics during the training process as: @@ -214,7 +214,7 @@ python federatedscope/main.py --cfg scripts/distributed_scripts/distributed_conf An executable example with generated toy data can be run with (a script can be found in `scripts/run_distributed_lr.sh`): ```bash # Generate the toy data -python scripts/gen_data.py +python scripts/distributed_scripts/gen_data.py # Firstly start the server that is waiting for clients to join in python federatedscope/main.py --cfg scripts/distributed_scripts/distributed_configs/distributed_server.yaml distribute.data_file toy_data/server_data distribute.server_host 127.0.0.1 distribute.server_port 50051 diff --git a/federatedscope/core/workers/server.py b/federatedscope/core/workers/server.py index 54301e46e..38ceb6435 100644 --- a/federatedscope/core/workers/server.py +++ b/federatedscope/core/workers/server.py @@ -572,7 +572,7 @@ def merge_eval_results_from_all_clients(self): float(client_eval_results[key])) formatted_logs = self._monitor.format_eval_res( metrics_all_clients, - rnd=self.state, + rnd=round, role='Server #', forms=self._cfg.eval.report) if merge_type == "unseen": diff --git a/scripts/README.md b/scripts/README.md index 8a1d427c4..4da62f7c4 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -5,7 +5,7 @@ We greatly appreciate any [contribution](https://federatedscope.io/docs/contribu - [Distribute Mode](#distribute-mode) - [Asynchronous Training Strategy](#asynchronous-training-strategy) - [Graph Federated Learning](#graph-federated-learning) -- [Attacks in Federated Learning](#attacks-in-FL) +- [Attacks in Federated Learning](#attacks-in-federated-learning) ### Distribute Mode Users can train an LR on generated toy data with distribute mode via: @@ -50,11 +50,13 @@ python federatedscope/main.py --cfg federatedscope/cv/baseline/fedbn_convnet2_on We provide an example for training ConvNet on CIFAR-10 with asynchronous training strategies: ```shell script cd .. -python federatedscope/main.py --cfg scritpes/example_configs/asyn_cifar10.yaml +python federatedscope/main.py --cfg scripts/example_configs/asyn_cifar10.yaml ``` The FL courses consists of 1 server and 200 clients, which applies `goal_achieved` strategies and set the `min_received_num=10` and `staleness_toleration=10`. Users can change the configurations related to asynchronous training for customization. Please see [configurations](https://github.com/alibaba/FederatedScope/tree/master/federatedscope/core/configs). +Note that users can manually download [cifar-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset and put it to `FederatedScope/data` if the automatic download process failed. And for `resource_info_file`, we take the [client_device_capacity](https://github.com/SymbioticLab/FedScale/blob/master/benchmark/dataset/data/device_info/client_device_capacity) provided by [1] as an example. + ### Graph Federated Learning Please refer to [gfl](https://github.com/alibaba/FederatedScope/tree/master/federatedscope/gfl) for more details. @@ -65,37 +67,41 @@ We provide the following four examples to run the membership inference attack, p Membership inference attack: -Run the attack in [1]: +Run the attack in [2]: ```shell script python federatedscope/main.py --cfg scripts/attack_exp_scripts/privacy_attack/gradient_ascent_MIA_on_femnist.yaml ``` -Property inference attack: Run the BPC [1] attack +Property inference attack: Run the BPC [2] attack ```shell script python federatedscope/main.py --cfg scripts/attack_exp_scripts/privacy_attack/PIA_toy.yaml ``` -Class representative attack: Run DCGAN [2] attack +Class representative attack: Run DCGAN [3] attack ```shell script python federatedscope/main.py --cfg scripts/attack_exp_scripts/privacy_attack/CRA_fedavg_convnet2_on_femnist.yaml ``` -Training data/label inference attack: Run the DLG [3] attack +Training data/label inference attack: Run the DLG [4] attack ```shell script python federatedscope/main.py --cfg scripts/attack_exp_scripts/privacy_attack/reconstruct_fedavg_opt_on_femnist.yaml ``` -[1] Nasr, Milad, R. Shokri and Amir Houmansadr. “Comprehensive Privacy Analysis of Deep Learning: Stand-alone and Federated Learning under Passive and Active White-box Inference Attacks.” ArXiv abs/1812.00910 (2018): n. pag. - -[2] Hitaj, Briland, Giuseppe Ateniese, and Fernando Perez-Cruz. "Deep models under the GAN: information leakage from collaborative deep learning." Proceedings of the 2017 ACM SIGSAC conference on computer and communications security. 2017 - -[3] Zhu, Ligeng, Zhijian Liu, and Song Han. "Deep leakage from gradients." Advances in Neural Information Processing Systems 32 (2019). #### Backdoor Attacks -Run the BadNet attack: +Run the BadNets [5] attack: ```shell script python federatedscope/main.py --cfg scripts/attack_exp_scripts/backdoor_attack/backdoor_badnet_fedavg_convnet2_on_femnist.yaml ``` +### References: +[1] Lai F, Dai Y, Singapuram S, et al. "FedScale: Benchmarking model and system performance of federated learning at scale." International Conference on Machine Learning. PMLR, 2022: 11814-11827. + +[2] Nasr, Milad, R. Shokri and Amir Houmansadr. "Comprehensive Privacy Analysis of Deep Learning: Stand-alone and Federated Learning under Passive and Active White-box Inference Attacks." ArXiv abs/1812.00910 (2018). + +[3] Hitaj, Briland, Giuseppe Ateniese, and Fernando Perez-Cruz. "Deep models under the GAN: information leakage from collaborative deep learning." Proceedings of the 2017 ACM SIGSAC conference on computer and communications security. + +[4] Zhu, Ligeng, Zhijian Liu, and Song Han. "Deep leakage from gradients." Advances in Neural Information Processing Systems 32 (2019). +[5] Tianyu Gu, Kang Liu, Brendan Dolan-Gavitt, and Siddharth Garg. 2019. "BadNets: Evaluating Backdooring Attacks on Deep Neural Networks." IEEE Access 7 (2019), 47230-47244. diff --git a/scripts/attack_exp_scripts/privacy_attack/PIA_toy.yaml b/scripts/attack_exp_scripts/privacy_attack/PIA_toy.yaml index 7e33d2f6c..fb36f4b63 100644 --- a/scripts/attack_exp_scripts/privacy_attack/PIA_toy.yaml +++ b/scripts/attack_exp_scripts/privacy_attack/PIA_toy.yaml @@ -1,4 +1,4 @@ -use_gpu: True +use_gpu: False device: 0 seed: 12345 federate: diff --git a/scripts/distributed_scripts/run_distributed_lr.sh b/scripts/distributed_scripts/run_distributed_lr.sh index 361ebfcc4..efce4cb4b 100755 --- a/scripts/distributed_scripts/run_distributed_lr.sh +++ b/scripts/distributed_scripts/run_distributed_lr.sh @@ -5,7 +5,7 @@ cd .. echo "Test distributed mode with LR..." echo "Data generation" -python scripts/gen_data.py +python scripts/distributed_scripts/gen_data.py ### server owns global test data python federatedscope/main.py --cfg scripts/distributed_scripts/distributed_configs/distributed_server.yaml & diff --git a/scripts/example_configs/asyn_cifar10.yaml b/scripts/example_configs/asyn_cifar10.yaml index 9fa19a7e6..01d5150de 100644 --- a/scripts/example_configs/asyn_cifar10.yaml +++ b/scripts/example_configs/asyn_cifar10.yaml @@ -15,7 +15,7 @@ federate: data: root: data/ type: CIFAR10@torchvision - args: [{'download': False}] + args: [{'download': True}] splits: [0.8,0.2,0.2] batch_size: 10 subsample: 0.2