diff --git a/tools/test.py b/tools/test.py index a9d88b8074..e36eeeb3ea 100644 --- a/tools/test.py +++ b/tools/test.py @@ -49,6 +49,12 @@ def parse_args(): '--gpu-collect', action='store_true', help='whether to use gpu to collect results.') + parser.add_argument( + '--gpu-id', + type=int, + default=0, + help='id of gpu to use ' + '(only applicable to non-distributed testing)') parser.add_argument( '--tmpdir', help='tmp directory used for collecting results from multiple ' @@ -136,6 +142,8 @@ def main(): cfg.model.pretrained = None cfg.data.test.test_mode = True + cfg.gpu_ids = [args.gpu_id] + # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False diff --git a/tools/train.py b/tools/train.py index 81c7d854ea..258dd18e35 100644 --- a/tools/train.py +++ b/tools/train.py @@ -35,13 +35,19 @@ def parse_args(): group_gpus.add_argument( '--gpus', type=int, - help='number of gpus to use ' + help='(Deprecated, please use --gpu-id) number of gpus to use ' '(only applicable to non-distributed training)') group_gpus.add_argument( '--gpu-ids', type=int, nargs='+', - help='ids of gpus to use ' + help='(Deprecated, please use --gpu-id) ids of gpus to use ' + '(only applicable to non-distributed training)') + group_gpus.add_argument( + '--gpu-id', + type=int, + default=0, + help='id of gpu to use ' '(only applicable to non-distributed training)') parser.add_argument('--seed', type=int, default=None, help='random seed') parser.add_argument( @@ -118,10 +124,20 @@ def main(): cfg.load_from = args.load_from if args.resume_from is not None: cfg.resume_from = args.resume_from + if args.gpus is not None: + cfg.gpu_ids = range(1) + warnings.warn('`--gpus` is deprecated because we only support ' + 'single GPU mode in non-distributed training. ' + 'Use `gpus=1` now.') if args.gpu_ids is not None: - cfg.gpu_ids = args.gpu_ids - else: - cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) + cfg.gpu_ids = args.gpu_ids[0:1] + warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. ' + 'Because we only support single GPU mode in ' + 'non-distributed training. Use the first GPU ' + 'in `gpu_ids` now.') + if args.gpus is None and args.gpu_ids is None: + cfg.gpu_ids = [args.gpu_id] + cfg.auto_resume = args.auto_resume # init distributed env first, since logger depends on the dist info.