From ffcf6b7f30639b709a638df343e0f7516aeec4dc Mon Sep 17 00:00:00 2001 From: irvingzhang0512 Date: Sat, 19 Dec 2020 02:02:41 +0800 Subject: [PATCH 1/9] [Improvement] save best val ckpt --- mmaction/core/evaluation/eval_hooks.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/mmaction/core/evaluation/eval_hooks.py b/mmaction/core/evaluation/eval_hooks.py index 40fd5631f2..f710651451 100644 --- a/mmaction/core/evaluation/eval_hooks.py +++ b/mmaction/core/evaluation/eval_hooks.py @@ -1,4 +1,6 @@ +import os import os.path as osp +import shutil import warnings from math import inf @@ -28,6 +30,8 @@ class EpochEvalHook(Hook): interval (int): Evaluation interval (by epochs). Default: 1. save_best (bool): Whether to save best checkpoint during evaluation. Default: True. + best_ckpt_name (str): If not None, save best ckpt in work_dir with + this name. Default: None. key_indicator (str | None): Key indicator to measure the best checkpoint during evaluation when ``save_best`` is set to True. Options are the evaluation metrics to the test dataset. e.g., @@ -53,6 +57,7 @@ def __init__(self, start=None, interval=1, save_best=True, + best_ckpt_name=None, key_indicator='top1_acc', rule=None, **eval_kwargs): @@ -93,6 +98,7 @@ def __init__(self, self.start = start self.eval_kwargs = eval_kwargs self.save_best = save_best + self.best_ckpt_name = best_ckpt_name self.key_indicator = key_indicator self.rule = rule @@ -158,6 +164,12 @@ def after_train_epoch(self, runner): self.best_json['best_ckpt'] = current_ckpt_path self.best_json['key_indicator'] = self.key_indicator mmcv.dump(self.best_json, json_path) + if self.best_ckpt_name: + best_ckpt_path = os.path.join(runner.work_dir, + self.best_ckpt_name) + if os.path.isfile(best_ckpt_path): + os.remove(best_ckpt_path) + shutil.copyfile(current_ckpt_path, best_ckpt_path) def evaluate(self, runner, results): """Evaluate the results. @@ -197,6 +209,8 @@ class DistEpochEvalHook(EpochEvalHook): interval (int): Evaluation interval (by epochs). Default: 1. save_best (bool): Whether to save best checkpoint during evaluation. Default: True. + best_ckpt_name (str): If not None, save best ckpt in work_dir with + this name. Default: None. key_indicator (str | None): Key indicator to measure the best checkpoint during evaluation when ``save_best`` is set to True. Options are the evaluation metrics to the test dataset. e.g., @@ -221,6 +235,7 @@ def __init__(self, start=None, interval=1, save_best=True, + best_ckpt_name=None, key_indicator='top1_acc', rule=None, tmpdir=None, @@ -231,6 +246,7 @@ def __init__(self, start=start, interval=interval, save_best=save_best, + best_ckpt_name=None, key_indicator=key_indicator, rule=rule, **eval_kwargs) @@ -275,3 +291,9 @@ def after_train_epoch(self, runner): self.best_json['best_ckpt'] = current_ckpt_path self.best_json['key_indicator'] = self.key_indicator mmcv.dump(self.best_json, json_path) + if self.best_ckpt_name: + best_ckpt_path = os.path.join(runner.work_dir, + self.best_ckpt_name) + if os.path.isfile(best_ckpt_path): + os.remove(best_ckpt_path) + shutil.copyfile(current_ckpt_path, best_ckpt_path) From 22f2ddb17d9de9e4269f110143813599ab38e9d6 Mon Sep 17 00:00:00 2001 From: irvingzhang0512 Date: Sat, 19 Dec 2020 03:35:23 +0800 Subject: [PATCH 2/9] refactor and add unittest --- mmaction/core/evaluation/eval_hooks.py | 48 ++++++++++---------------- tests/test_runtime/test_eval_hook.py | 8 +++-- 2 files changed, 25 insertions(+), 31 deletions(-) diff --git a/mmaction/core/evaluation/eval_hooks.py b/mmaction/core/evaluation/eval_hooks.py index f710651451..f7f7ef35c0 100644 --- a/mmaction/core/evaluation/eval_hooks.py +++ b/mmaction/core/evaluation/eval_hooks.py @@ -138,6 +138,21 @@ def evaluation_flag(self, runner): return False return True + def _do_save_best(self, key_score, json_path, current_ckpt_path, runner): + if (self.save_best and self.compare_func(key_score, self.best_score)): + self.best_score = key_score + self.logger.info( + f'Now best checkpoint is epoch_{runner.epoch + 1}.pth') + self.best_json['best_score'] = self.best_score + self.best_json['best_ckpt'] = current_ckpt_path + self.best_json['key_indicator'] = self.key_indicator + mmcv.dump(self.best_json, json_path) + if self.best_ckpt_name: + best_ckpt_path = osp(runner.work_dir, self.best_ckpt_name) + if os.path.isfile(best_ckpt_path): + os.remove(best_ckpt_path) + shutil.copyfile(current_ckpt_path, best_ckpt_path) + def after_train_epoch(self, runner): """Called after every training epoch to evaluate the results.""" if not self.evaluation_flag(runner): @@ -156,20 +171,8 @@ def after_train_epoch(self, runner): from mmaction.apis import single_gpu_test results = single_gpu_test(runner.model, self.dataloader) key_score = self.evaluate(runner, results) - if (self.save_best and self.compare_func(key_score, self.best_score)): - self.best_score = key_score - self.logger.info( - f'Now best checkpoint is epoch_{runner.epoch + 1}.pth') - self.best_json['best_score'] = self.best_score - self.best_json['best_ckpt'] = current_ckpt_path - self.best_json['key_indicator'] = self.key_indicator - mmcv.dump(self.best_json, json_path) - if self.best_ckpt_name: - best_ckpt_path = os.path.join(runner.work_dir, - self.best_ckpt_name) - if os.path.isfile(best_ckpt_path): - os.remove(best_ckpt_path) - shutil.copyfile(current_ckpt_path, best_ckpt_path) + self._do_save_best(self, key_score, json_path, current_ckpt_path, + runner) def evaluate(self, runner, results): """Evaluate the results. @@ -282,18 +285,5 @@ def after_train_epoch(self, runner): if runner.rank == 0: print('\n') key_score = self.evaluate(runner, results) - if (self.save_best and key_score is not None - and self.compare_func(key_score, self.best_score)): - self.best_score = key_score - self.logger.info( - f'Now best checkpoint is epoch_{runner.epoch + 1}.pth') - self.best_json['best_score'] = self.best_score - self.best_json['best_ckpt'] = current_ckpt_path - self.best_json['key_indicator'] = self.key_indicator - mmcv.dump(self.best_json, json_path) - if self.best_ckpt_name: - best_ckpt_path = os.path.join(runner.work_dir, - self.best_ckpt_name) - if os.path.isfile(best_ckpt_path): - os.remove(best_ckpt_path) - shutil.copyfile(current_ckpt_path, best_ckpt_path) + self._do_save_best(self, key_score, json_path, current_ckpt_path, + runner) diff --git a/tests/test_runtime/test_eval_hook.py b/tests/test_runtime/test_eval_hook.py index af02be5cbb..221d1ff30f 100644 --- a/tests/test_runtime/test_eval_hook.py +++ b/tests/test_runtime/test_eval_hook.py @@ -247,8 +247,8 @@ def test_eval_hook(): assert best_json['key_indicator'] == 'acc' data_loader = DataLoader(EvalDataset(), batch_size=1) - eval_hook = EpochEvalHook(data_loader, key_indicator='acc') with tempfile.TemporaryDirectory() as tmpdir: + eval_hook = EpochEvalHook(data_loader, key_indicator='acc') logger = get_logger('test_eval') runner = EpochBasedRunner( model=model, @@ -270,7 +270,8 @@ def test_eval_hook(): resume_from = osp.join(tmpdir, 'latest.pth') loader = DataLoader(ExampleDataset(), batch_size=1) - eval_hook = EpochEvalHook(data_loader, key_indicator='acc') + eval_hook = EpochEvalHook( + data_loader, key_indicator='acc', best_ckpt_name='best.pth') runner = EpochBasedRunner( model=model, batch_processor=None, @@ -285,10 +286,13 @@ def test_eval_hook(): best_json_path = osp.join(tmpdir, 'best.json') best_json = mmcv.load(best_json_path) real_path = osp.join(tmpdir, 'epoch_4.pth') + best_ckpt_path = osp.join(tmpdir, 'best.pth') assert best_json['best_ckpt'] == osp.realpath(real_path) assert best_json['best_score'] == 7 assert best_json['key_indicator'] == 'acc' + import os + assert os.path.isfile(best_ckpt_path) @patch('mmaction.apis.single_gpu_test', MagicMock) From 9547da150d3b2c9184a90727250ce03bcdcf5422 Mon Sep 17 00:00:00 2001 From: irvingzhang0512 Date: Sat, 19 Dec 2020 03:55:24 +0800 Subject: [PATCH 3/9] fix unittest --- mmaction/core/evaluation/eval_hooks.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/mmaction/core/evaluation/eval_hooks.py b/mmaction/core/evaluation/eval_hooks.py index f7f7ef35c0..5009768d78 100644 --- a/mmaction/core/evaluation/eval_hooks.py +++ b/mmaction/core/evaluation/eval_hooks.py @@ -171,8 +171,7 @@ def after_train_epoch(self, runner): from mmaction.apis import single_gpu_test results = single_gpu_test(runner.model, self.dataloader) key_score = self.evaluate(runner, results) - self._do_save_best(self, key_score, json_path, current_ckpt_path, - runner) + self._do_save_best(key_score, json_path, current_ckpt_path, runner) def evaluate(self, runner, results): """Evaluate the results. @@ -285,5 +284,4 @@ def after_train_epoch(self, runner): if runner.rank == 0: print('\n') key_score = self.evaluate(runner, results) - self._do_save_best(self, key_score, json_path, current_ckpt_path, - runner) + self._do_save_best(key_score, json_path, current_ckpt_path, runner) From 1aa60ea2d8d5e60aaf795ba9362bbc716420ab28 Mon Sep 17 00:00:00 2001 From: irving Date: Sat, 19 Dec 2020 13:46:45 +0800 Subject: [PATCH 4/9] Merge branch 'master' into best-ckpt --- mmaction/models/builder.py | 2 +- mmaction/models/heads/roi_head.py | 2 +- mmaction/models/roi_extractors/single_straight3d.py | 6 +++++- tools/data/activitynet/download_videos.sh | 1 + tools/data/gym/environment.yml | 2 +- 5 files changed, 9 insertions(+), 4 deletions(-) diff --git a/mmaction/models/builder.py b/mmaction/models/builder.py index df95a94d40..f7316bd69a 100644 --- a/mmaction/models/builder.py +++ b/mmaction/models/builder.py @@ -13,7 +13,7 @@ # Define an empty registry and building func, so that can import DETECTORS = Registry('detector') - def bulid_detector(cfg, train_cfg, test_cfg): + def build_detector(cfg, train_cfg, test_cfg): pass diff --git a/mmaction/models/heads/roi_head.py b/mmaction/models/heads/roi_head.py index f94aa85a6a..45a36df528 100644 --- a/mmaction/models/heads/roi_head.py +++ b/mmaction/models/heads/roi_head.py @@ -85,5 +85,5 @@ def simple_test_bboxes(self, return det_bboxes, det_labels else: # Just define an empty class, so that __init__ can import it. - class AVARoIHead(StandardRoIHead): + class AVARoIHead: pass diff --git a/mmaction/models/roi_extractors/single_straight3d.py b/mmaction/models/roi_extractors/single_straight3d.py index 09d40b1dcf..aafceeaf1d 100644 --- a/mmaction/models/roi_extractors/single_straight3d.py +++ b/mmaction/models/roi_extractors/single_straight3d.py @@ -2,7 +2,11 @@ import torch import torch.nn as nn -from mmcv.ops import RoIAlign, RoIPool + +try: + from mmcv.ops import RoIAlign, RoIPool +except (ImportError, ModuleNotFoundError): + warnings.warn('Please install mmcv-full to use RoIAlign and RoIPool') try: import mmdet # noqa diff --git a/tools/data/activitynet/download_videos.sh b/tools/data/activitynet/download_videos.sh index 602cf48fcd..5d10a1017d 100644 --- a/tools/data/activitynet/download_videos.sh +++ b/tools/data/activitynet/download_videos.sh @@ -4,6 +4,7 @@ conda env create -f environment.yml source activate activitynet pip install --upgrade youtube-dl +pip install mmcv DATA_DIR="../../../data/ActivityNet" python download.py diff --git a/tools/data/gym/environment.yml b/tools/data/gym/environment.yml index 86e7e1a24c..b9ecc82678 100644 --- a/tools/data/gym/environment.yml +++ b/tools/data/gym/environment.yml @@ -1,4 +1,4 @@ -name: kinetics +name: gym channels: - anaconda - menpo From c83af6a10d1ef7423256eb8c96a8e44cdd9f1c73 Mon Sep 17 00:00:00 2001 From: irving Date: Sat, 19 Dec 2020 14:11:05 +0800 Subject: [PATCH 5/9] fix bug --- mmaction/core/evaluation/eval_hooks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mmaction/core/evaluation/eval_hooks.py b/mmaction/core/evaluation/eval_hooks.py index 5009768d78..cca9888a9d 100644 --- a/mmaction/core/evaluation/eval_hooks.py +++ b/mmaction/core/evaluation/eval_hooks.py @@ -148,8 +148,8 @@ def _do_save_best(self, key_score, json_path, current_ckpt_path, runner): self.best_json['key_indicator'] = self.key_indicator mmcv.dump(self.best_json, json_path) if self.best_ckpt_name: - best_ckpt_path = osp(runner.work_dir, self.best_ckpt_name) - if os.path.isfile(best_ckpt_path): + best_ckpt_path = osp.join(runner.work_dir, self.best_ckpt_name) + if osp.isfile(best_ckpt_path): os.remove(best_ckpt_path) shutil.copyfile(current_ckpt_path, best_ckpt_path) From 6f32ec2c6ec4626e1388222afb6ea04fc88be74c Mon Sep 17 00:00:00 2001 From: irving Date: Sat, 19 Dec 2020 14:23:43 +0800 Subject: [PATCH 6/9] use `save_checkpoint` to save ckpt. --- mmaction/core/evaluation/eval_hooks.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/mmaction/core/evaluation/eval_hooks.py b/mmaction/core/evaluation/eval_hooks.py index cca9888a9d..4086418a8c 100644 --- a/mmaction/core/evaluation/eval_hooks.py +++ b/mmaction/core/evaluation/eval_hooks.py @@ -148,10 +148,7 @@ def _do_save_best(self, key_score, json_path, current_ckpt_path, runner): self.best_json['key_indicator'] = self.key_indicator mmcv.dump(self.best_json, json_path) if self.best_ckpt_name: - best_ckpt_path = osp.join(runner.work_dir, self.best_ckpt_name) - if osp.isfile(best_ckpt_path): - os.remove(best_ckpt_path) - shutil.copyfile(current_ckpt_path, best_ckpt_path) + runner.save_checkpoint(runner.work_dir, self.best_ckpt_name) def after_train_epoch(self, runner): """Called after every training epoch to evaluate the results.""" From 26babc57bbb4506bc0b221c2967075a0411ad880 Mon Sep 17 00:00:00 2001 From: irving Date: Sat, 19 Dec 2020 17:09:32 +0800 Subject: [PATCH 7/9] rename ckpt to --- mmaction/core/evaluation/eval_hooks.py | 34 +++++++++++++++++--------- tests/test_runtime/test_eval_hook.py | 6 ++--- 2 files changed, 25 insertions(+), 15 deletions(-) diff --git a/mmaction/core/evaluation/eval_hooks.py b/mmaction/core/evaluation/eval_hooks.py index 4086418a8c..b4bc037efb 100644 --- a/mmaction/core/evaluation/eval_hooks.py +++ b/mmaction/core/evaluation/eval_hooks.py @@ -1,6 +1,5 @@ import os import os.path as osp -import shutil import warnings from math import inf @@ -30,8 +29,8 @@ class EpochEvalHook(Hook): interval (int): Evaluation interval (by epochs). Default: 1. save_best (bool): Whether to save best checkpoint during evaluation. Default: True. - best_ckpt_name (str): If not None, save best ckpt in work_dir with - this name. Default: None. + save_best_ckpt (bool): Whether to save best checkpoint in work_dir. + Default: None. key_indicator (str | None): Key indicator to measure the best checkpoint during evaluation when ``save_best`` is set to True. Options are the evaluation metrics to the test dataset. e.g., @@ -57,7 +56,7 @@ def __init__(self, start=None, interval=1, save_best=True, - best_ckpt_name=None, + save_best_ckpt=False, key_indicator='top1_acc', rule=None, **eval_kwargs): @@ -98,7 +97,8 @@ def __init__(self, self.start = start self.eval_kwargs = eval_kwargs self.save_best = save_best - self.best_ckpt_name = best_ckpt_name + self.save_best_ckpt = save_best_ckpt + self._cur_best_ckpt_path = None self.key_indicator = key_indicator self.rule = rule @@ -147,8 +147,20 @@ def _do_save_best(self, key_score, json_path, current_ckpt_path, runner): self.best_json['best_ckpt'] = current_ckpt_path self.best_json['key_indicator'] = self.key_indicator mmcv.dump(self.best_json, json_path) - if self.best_ckpt_name: - runner.save_checkpoint(runner.work_dir, self.best_ckpt_name) + + if self.save_best_ckpt: + # remove previous best ckpt + if self._cur_best_ckpt_path and \ + osp.isfile(self._cur_best_ckpt_path): + os.remove(self._cur_best_ckpt_path) + + # save current checkpoint in work_dir + # checkpoint name 'best_{best_score}_{epoch_id}.pth' + cur_best_ckpt_name = 'best_{:.4f}_{}.pth'.format( + key_score, runner.epoch + 1) + runner.save_checkpoint(runner.work_dir, cur_best_ckpt_name) + self._cur_best_ckpt_path = osp.join(runner.work_dir, + cur_best_ckpt_name) def after_train_epoch(self, runner): """Called after every training epoch to evaluate the results.""" @@ -208,8 +220,8 @@ class DistEpochEvalHook(EpochEvalHook): interval (int): Evaluation interval (by epochs). Default: 1. save_best (bool): Whether to save best checkpoint during evaluation. Default: True. - best_ckpt_name (str): If not None, save best ckpt in work_dir with - this name. Default: None. + save_best_ckpt (bool): Whether to save best checkpoint in work_dir. + Default: None. key_indicator (str | None): Key indicator to measure the best checkpoint during evaluation when ``save_best`` is set to True. Options are the evaluation metrics to the test dataset. e.g., @@ -234,7 +246,7 @@ def __init__(self, start=None, interval=1, save_best=True, - best_ckpt_name=None, + save_best_ckpt=False, key_indicator='top1_acc', rule=None, tmpdir=None, @@ -245,7 +257,7 @@ def __init__(self, start=start, interval=interval, save_best=save_best, - best_ckpt_name=None, + save_best_ckpt=save_best_ckpt, key_indicator=key_indicator, rule=rule, **eval_kwargs) diff --git a/tests/test_runtime/test_eval_hook.py b/tests/test_runtime/test_eval_hook.py index 221d1ff30f..e65191c62a 100644 --- a/tests/test_runtime/test_eval_hook.py +++ b/tests/test_runtime/test_eval_hook.py @@ -271,7 +271,7 @@ def test_eval_hook(): resume_from = osp.join(tmpdir, 'latest.pth') loader = DataLoader(ExampleDataset(), batch_size=1) eval_hook = EpochEvalHook( - data_loader, key_indicator='acc', best_ckpt_name='best.pth') + data_loader, key_indicator='acc', save_best_ckpt=True) runner = EpochBasedRunner( model=model, batch_processor=None, @@ -286,13 +286,11 @@ def test_eval_hook(): best_json_path = osp.join(tmpdir, 'best.json') best_json = mmcv.load(best_json_path) real_path = osp.join(tmpdir, 'epoch_4.pth') - best_ckpt_path = osp.join(tmpdir, 'best.pth') assert best_json['best_ckpt'] == osp.realpath(real_path) assert best_json['best_score'] == 7 assert best_json['key_indicator'] == 'acc' - import os - assert os.path.isfile(best_ckpt_path) + assert osp.isfile(osp.join(tmpdir, 'best_7.0000_4.pth')) @patch('mmaction.apis.single_gpu_test', MagicMock) From ffc7a59f6ff00dbb3bda254102e6ef81d4ed2dd2 Mon Sep 17 00:00:00 2001 From: irving Date: Sat, 19 Dec 2020 17:18:09 +0800 Subject: [PATCH 8/9] update changelog --- docs/changelog.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/changelog.md b/docs/changelog.md index 3e56586252..c439314a2f 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -15,6 +15,7 @@ - Add markdown lint in pre-commit hook ([#255](https://github.com/open-mmlab/mmaction2/pull/225)) - Use title case in modelzoo statistics. ([#456](https://github.com/open-mmlab/mmaction2/pull/456)) - Add FAQ documents for easy troubleshooting. ([#413](https://github.com/open-mmlab/mmaction2/pull/413), [#420](https://github.com/open-mmlab/mmaction2/pull/420), [#439](https://github.com/open-mmlab/mmaction2/pull/439)) +- Save best checkpoint during training. ([#464](https://github.com/open-mmlab/mmaction2/pull/464)) **Bug and Typo Fixes** From 4500f7fb521bf91db9dcd1b44ec3d32e11bedee2 Mon Sep 17 00:00:00 2001 From: irving Date: Sat, 19 Dec 2020 17:19:16 +0800 Subject: [PATCH 9/9] Merge branch 'master' into best-ckpt --- docs/changelog.md | 5 +++-- tools/test.py | 17 +++++++++++------ 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/docs/changelog.md b/docs/changelog.md index c439314a2f..069461bdd5 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -13,13 +13,14 @@ - Support training and testing for Spatio-Temporal Action Detection ([#351](https://github.com/open-mmlab/mmaction2/pull/351)) - Fix CI due to pip upgrade ([#454](https://github.com/open-mmlab/mmaction2/pull/454)) - Add markdown lint in pre-commit hook ([#255](https://github.com/open-mmlab/mmaction2/pull/225)) -- Use title case in modelzoo statistics. ([#456](https://github.com/open-mmlab/mmaction2/pull/456)) +- Use title case in modelzoo statistics ([#456](https://github.com/open-mmlab/mmaction2/pull/456)) - Add FAQ documents for easy troubleshooting. ([#413](https://github.com/open-mmlab/mmaction2/pull/413), [#420](https://github.com/open-mmlab/mmaction2/pull/420), [#439](https://github.com/open-mmlab/mmaction2/pull/439)) - Save best checkpoint during training. ([#464](https://github.com/open-mmlab/mmaction2/pull/464)) **Bug and Typo Fixes** -- Fix typo in default argument of BaseHead. ([#446](https://github.com/open-mmlab/mmaction2/pull/446)) +- Fix typo in default argument of BaseHead ([#446](https://github.com/open-mmlab/mmaction2/pull/446)) +- Fix potential bug about `output_config` overwrite ([#463](https://github.com/open-mmlab/mmaction2/pull/463)) **ModelZoo** diff --git a/tools/test.py b/tools/test.py index 86315d78f5..b50f8de9cb 100644 --- a/tools/test.py +++ b/tools/test.py @@ -102,15 +102,20 @@ def main(): # Load output_config from cfg output_config = cfg.get('output_config', {}) - # Overwrite output_config from args.out - output_config = Config._merge_a_into_b(dict(out=args.out), output_config) + if args.out: + # Overwrite output_config from args.out + output_config = Config._merge_a_into_b( + dict(out=args.out), output_config) # Load eval_config from cfg eval_config = cfg.get('eval_config', {}) - # Overwrite eval_config from args.eval - eval_config = Config._merge_a_into_b(dict(metrics=args.eval), eval_config) - # Add options from args.eval_options - eval_config = Config._merge_a_into_b(args.eval_options, eval_config) + if args.eval: + # Overwrite eval_config from args.eval + eval_config = Config._merge_a_into_b( + dict(metrics=args.eval), eval_config) + if args.eval_options: + # Add options from args.eval_options + eval_config = Config._merge_a_into_b(args.eval_options, eval_config) assert output_config or eval_config, \ ('Please specify at least one operation (save or eval the '