-
Notifications
You must be signed in to change notification settings - Fork 214
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add condition param to autotune; Add feature engineering module #426
Changes from 50 commits
617a0ae
2610730
df3d5ad
70086de
0ab58fd
1fe2d6e
1991c91
7cd522a
d8ce865
71211d9
85f2d8c
0f03ecb
62abf7d
e4e4537
6c8a78d
bd99a09
68511ff
51b45cd
c068d14
040f8b3
7e81d81
5ddee3b
426e392
0274fea
cac7aca
c07184b
4c1fb67
f9f16d1
5c19466
4155814
de458ca
703b4c5
572ca7e
9d62f02
445f157
db04d5d
78c0f82
e347954
a0287d4
0c68059
9b7b4d8
ac4177d
f747b4c
ef3004f
9f6621b
e8d154b
446975d
06d6ab6
5b8a72e
42a3e51
47c256a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
name: UnitTests for Autotune Module | ||
|
||
on: [push, pull_request] | ||
|
||
jobs: | ||
run: | ||
runs-on: ${{ matrix.os }} | ||
timeout-minutes: 20 | ||
strategy: | ||
matrix: | ||
os: [ubuntu-latest] | ||
python-version: ['3.9'] | ||
torch-version: ['1.10.1'] | ||
torchvision-version: ['0.11.2'] | ||
torchaudio-version: ['0.10.1'] | ||
env: | ||
OS: ${{ matrix.os }} | ||
PYTHON: '3.9' | ||
steps: | ||
- uses: actions/checkout@master | ||
- name: Setup Python ${{ matrix.python-version }} | ||
uses: actions/setup-python@master | ||
with: | ||
python-version: ${{ matrix.python-version }} | ||
- name: Install PyTorch ${{ matrix.torch-version }}+cpu | ||
run: | | ||
pip install numpy typing-extensions dataclasses | ||
pip install torch==${{ matrix.torch-version}}+cpu torchvision==${{matrix.torchvision-version}}+cpu torchaudio==${{matrix.torchaudio-version}}+cpu -f https://download.pytorch.org/whl/torch_stable.html | ||
- name: Install FS | ||
run: | | ||
pip install -e .[test,hpo] | ||
- name: Test Autotune | ||
run: | | ||
python federatedscope/hpo.py --cfg federatedscope/autotune/baseline/fedhpo_vfl.yaml | ||
[ $? -eq 1 ] && exit 1 || echo "Passed" |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,54 @@ | ||
use_gpu: False | ||
device: 0 | ||
backend: torch | ||
outdir: vFL_adult | ||
federate: | ||
mode: standalone | ||
client_num: 2 | ||
total_round_num: 30 | ||
model: | ||
type: lr | ||
use_bias: False | ||
train: | ||
optimizer: | ||
lr: 0.5 | ||
bin_num: 100 | ||
lambda_: 0.1 | ||
gamma: 0 | ||
num_of_trees: 5 | ||
max_tree_depth: 3 | ||
xgb_base: | ||
use: True | ||
use_bin: False | ||
data: | ||
root: data/ | ||
type: adult | ||
splits: [1.0, 0.0] | ||
args: [{normalization: False, standardization: True}] | ||
feat_engr: | ||
scenario: vfl | ||
dataloader: | ||
type: raw | ||
batch_size: 50 | ||
criterion: | ||
type: CrossEntropyLoss | ||
trainer: | ||
type: none | ||
vertical_dims: [7, 14] | ||
vertical: | ||
use: False | ||
key_size: 256 | ||
eval: | ||
freq: 5 | ||
best_res_update_round_wise_key: test_loss | ||
hpo: | ||
scheduler: sha | ||
num_workers: 0 | ||
init_cand_num: 9 | ||
ss: 'federatedscope/autotune/baseline/vfl_ss.yaml' | ||
sha: | ||
budgets: [ 3, 9 ] | ||
elim_rate: 3 | ||
iter: 1 | ||
metric: 'server_global_eval.test_loss' | ||
working_folder: sha |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,25 @@ | ||
train.optimizer.lr: | ||
type: float | ||
lower: 0.01 | ||
upper: 1.0 | ||
log: True | ||
train.optimizer.num_of_trees: | ||
type: int | ||
lower: 3 | ||
upper: 5 | ||
vertical.use: | ||
type: cate | ||
choices: [True, False] | ||
feat_engr.type: | ||
type: cate | ||
choices: ['', 'min_max_norm', 'instance_norm', 'standardization', 'log_transform', 'uniform_binning', 'quantile_binning', 'correlation_filter', 'variance_filter', 'iv_filter'] | ||
condition1: | ||
type: equal | ||
child: train.optimizer.num_of_trees | ||
parent: vertical.use | ||
value: False | ||
condition2: | ||
type: equal | ||
child: train.optimizer.lr | ||
parent: vertical.use | ||
value: True |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,14 @@ | ||
def run_scheduler(scheduler, cfg, client_cfgs=None): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. please provide docstring |
||
if cfg.hpo.scheduler in ['sha', 'wrap_sha']: | ||
_ = scheduler.optimize() | ||
elif cfg.hpo.scheduler in [ | ||
'rs', 'bo_kde', 'hb', 'bohb', 'wrap_rs', 'wrap_bo_kde', 'wrap_hb', | ||
'wrap_bohb' | ||
]: | ||
from federatedscope.autotune.hpbandster import run_hpbandster | ||
run_hpbandster(cfg, scheduler, client_cfgs) | ||
elif cfg.hpo.scheduler in ['bo_gp', 'bo_rf', 'wrap_bo_gp', 'wrap_bo_rf']: | ||
from federatedscope.autotune.smac import run_smac | ||
run_smac(cfg, scheduler, client_cfgs) | ||
else: | ||
raise ValueError(f'No scheduler named {cfg.hpo.scheduler}') |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,7 +1,7 @@ | ||
import logging | ||
import numpy as np | ||
import ConfigSpace as CS | ||
from federatedscope.autotune.utils import eval_in_fs | ||
from federatedscope.autotune.utils import eval_in_fs, log2wandb | ||
from smac.facade.smac_bb_facade import SMAC4BB | ||
from smac.facade.smac_hpo_facade import SMAC4HPO | ||
from smac.scenario.scenario import Scenario | ||
|
@@ -16,21 +16,26 @@ def run_smac(cfg, scheduler, client_cfgs=None): | |
|
||
def optimization_function_wrapper(config): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. please supplement docstring for this method. It seems that understanding what its input is largely help figuring out what this method does. |
||
budget = cfg.hpo.sha.budgets[-1] | ||
res = eval_in_fs(cfg, config, budget, client_cfgs) | ||
results = eval_in_fs(cfg, config, budget, client_cfgs) | ||
key1, key2 = cfg.hpo.metric.split('.') | ||
res = results[key1][key2] | ||
config = dict(config) | ||
config['federate.total_round_num'] = budget | ||
init_configs.append(config) | ||
perfs.append(res) | ||
logger.info(f'Evaluate the {len(perfs)-1}-th config ' | ||
f'{config}, and get performance {res}') | ||
if cfg.wandb.use: | ||
log2wandb(len(perfs) - 1, config, results, cfg) | ||
return res | ||
|
||
def summarize(): | ||
from federatedscope.autotune.utils import summarize_hpo_results | ||
results = summarize_hpo_results(init_configs, | ||
perfs, | ||
white_list=set(config_space.keys()), | ||
desc=cfg.hpo.larger_better) | ||
desc=cfg.hpo.larger_better, | ||
use_wandb=cfg.wandb.use) | ||
logger.info( | ||
"========================== HPO Final ==========================") | ||
logger.info("\n{}".format(results)) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
we'd better provide UT cases to cover this module.