Skip to content

Commit

Permalink
polish
Browse files Browse the repository at this point in the history
  • Loading branch information
dreamerlin committed Jan 9, 2021
1 parent acf46cd commit 0fbaf09
Showing 1 changed file with 19 additions and 11 deletions.
30 changes: 19 additions & 11 deletions mmcv/runner/hooks/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,14 @@

class EvalHook(Hook):
"""Non-Distributed evaluation hook.
Notes:
If new arguments are added for EvalHook, tools/test.py,
tools/eval_metric.py may be effected.
This hook will regularly perform evaluation in a given interval when
performing in non-distributed environment.
Args:
dataloader (DataLoader): A PyTorch dataloader.
start (int | None, optional): Evaluation starting epoch. It enables
Expand All @@ -32,8 +35,9 @@ class EvalHook(Hook):
Options are the evaluation metrics to the test dataset. e.g.,
``bbox_mAP``, ``segm_mAP`` for bbox detection and instance
segmentation. ``AR@100`` for proposal recall. If ``save_best`` is
``auto``, the first key will be used. The interval of
``CheckpointHook`` should device EvalHook. Default: None.
``auto``, the first key of the returned ``OrderedDict`` result
will be used. The interval of ``CheckpointHook`` should device
``EvalHook``. Default: None.
rule (str | None, optional): Comparison rule for best score. If set to
None, it will infer a reasonable rule. Keys such as 'acc', 'top'
.etc will be inferred by 'greater' rule. Keys contain 'loss' will
Expand Down Expand Up @@ -78,7 +82,7 @@ def __init__(self,
assert isinstance(save_best, str) or save_best is None
self.save_best = save_best
self.eval_kwargs = eval_kwargs
self.initial_epoch_flag = True
self.initial_flag = True

if self.save_best is not None:
self._init_rule(rule, self.save_best)
Expand Down Expand Up @@ -121,21 +125,21 @@ def before_train_iter(self, runner):
"""Evaluate the model only at the start of training by iteration."""
if self.by_epoch:
return
if not self.initial_epoch_flag:
if not self.initial_flag:
return
if self.start is not None and runner.iter >= self.start:
self.after_train_iter(runner)
self.initial_epoch_flag = False
self.initial_flag = False

def before_train_epoch(self, runner):
"""Evaluate the model only at the start of training by epoch."""
if not self.by_epoch:
return
if not self.initial_epoch_flag:
if not self.initial_flag:
return
if self.start is not None and runner.epoch >= self.start:
self.after_train_epoch(runner)
self.initial_epoch_flag = False
self.initial_flag = False

def after_train_iter(self, runner):
"""Called after every training iter to evaluate the results."""
Expand Down Expand Up @@ -178,7 +182,8 @@ def evaluation_flag(self, runner):
# No evaluation if start is larger than the current time.
return False
else:
# Evaluation only at epochs 3, 5, 7... if start==3 and interval==2
# Evaluation only at epochs/iters 3, 5, 7...
# if start==3 and interval==2
if (current + 1 - self.start) % self.interval:
return False
return True
Expand All @@ -187,7 +192,7 @@ def _save_ckpt(self, runner, key_score):
if self.by_epoch:
current = f'epoch_{runner.epoch + 1}'
else:
current = f'iter_{runner.epoch + 1}'
current = f'iter_{runner.iter + 1}'

best_score = runner.meta['hook_msgs'].get(
'best_score', self.init_value_map[self.rule])
Expand Down Expand Up @@ -226,8 +231,10 @@ def evaluate(self, runner, results):

class DistEvalHook(EvalHook):
"""Distributed evaluation hook.
This hook will regularly perform evaluation in a given interval when
performing in distributed environment.
Args:
dataloader (DataLoader): A PyTorch dataloader.
start (int | None, optional): Evaluation starting epoch. It enables
Expand All @@ -244,8 +251,9 @@ class DistEvalHook(EvalHook):
Options are the evaluation metrics to the test dataset. e.g.,
``bbox_mAP``, ``segm_mAP`` for bbox detection and instance
segmentation. ``AR@100`` for proposal recall. If ``save_best`` is
``auto``, the first key will be used. The interval of
``CheckpointHook`` should device EvalHook. Default: None.
``auto``, the first key of the returned ``OrderedDict`` result
will be used. The interval of ``CheckpointHook`` should depend on
``EvalHook``. Default: None.
rule (str | None, optional): Comparison rule for best score. If set to
None, it will infer a reasonable rule. Keys such as 'acc', 'top'
.etc will be inferred by 'greater' rule. Keys contain 'loss' will
Expand Down

0 comments on commit 0fbaf09

Please sign in to comment.