import os.path as osp
import warnings
from mmcv.runner import Hook
from torch.utils.data import DataLoader
[docs]class EvalHook(Hook):
"""Evaluation hook.
Notes:
If new arguments are added for EvalHook, tools/test.py may be
effected.
Attributes:
dataloader (DataLoader): A PyTorch dataloader.
start (int, optional): Evaluation starting epoch. It enables evaluation
before the training starts if ``start`` <= the resuming epoch.
If None, whether to evaluate is merely decided by ``interval``.
Default: None.
interval (int): Evaluation interval (by epochs). Default: 1.
**eval_kwargs: Evaluation arguments fed into the evaluate function of
the dataset.
"""
def __init__(self, dataloader, start=None, interval=1, **eval_kwargs):
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, but got'
f' {type(dataloader)}')
if not interval > 0:
raise ValueError(f'interval must be positive, but got {interval}')
if start is not None and start < 0:
warnings.warn(
f'The evaluation start epoch {start} is smaller than 0, '
f'use 0 instead', UserWarning)
start = 0
self.dataloader = dataloader
self.interval = interval
self.start = start
self.eval_kwargs = eval_kwargs
self.initial_epoch_flag = True
[docs] def before_train_epoch(self, runner):
"""Evaluate the model only at the start of training."""
if not self.initial_epoch_flag:
return
if self.start is not None and runner.epoch >= self.start:
self.after_train_epoch(runner)
self.initial_epoch_flag = False
[docs] def evaluation_flag(self, runner):
"""Judge whether to perform_evaluation after this epoch.
Returns:
bool: The flag indicating whether to perform evaluation.
"""
if self.start is None:
if not self.every_n_epochs(runner, self.interval):
# No evaluation during the interval epochs.
return False
elif (runner.epoch + 1) < self.start:
# No evaluation if start is larger than the current epoch.
return False
else:
# Evaluation only at epochs 3, 5, 7... if start==3 and interval==2
if (runner.epoch + 1 - self.start) % self.interval:
return False
return True
def after_train_epoch(self, runner):
if not self.evaluation_flag(runner):
return
from mmdet.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False)
self.evaluate(runner, results)
def evaluate(self, runner, results):
eval_res = self.dataloader.dataset.evaluate(
results, logger=runner.logger, **self.eval_kwargs)
for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
[docs]class DistEvalHook(EvalHook):
"""Distributed evaluation hook.
Notes:
If new arguments are added, tools/test.py may be effected.
Attributes:
dataloader (DataLoader): A PyTorch dataloader.
start (int, optional): Evaluation starting epoch. It enables evaluation
before the training starts if ``start`` <= the resuming epoch.
If None, whether to evaluate is merely decided by ``interval``.
Default: None.
interval (int): Evaluation interval (by epochs). Default: 1.
tmpdir (str | None): Temporary directory to save the results of all
processes. Default: None.
gpu_collect (bool): Whether to use gpu or cpu to collect results.
Default: False.
**eval_kwargs: Evaluation arguments fed into the evaluate function of
the dataset.
"""
def __init__(self,
dataloader,
start=None,
interval=1,
tmpdir=None,
gpu_collect=False,
**eval_kwargs):
super().__init__(
dataloader, start=start, interval=interval, **eval_kwargs)
self.tmpdir = tmpdir
self.gpu_collect = gpu_collect
def after_train_epoch(self, runner):
if not self.evaluation_flag(runner):
return
from mmdet.apis import multi_gpu_test
tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, '.eval_hook')
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=tmpdir,
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
self.evaluate(runner, results)