Skip to content

Commit

Permalink
[Fix] Fix UT and remove delete mmcv ops. (open-mmlab#8623)
Browse files Browse the repository at this point in the history
* Remove get_root_logger

* Fix UT

* Update
  • Loading branch information
jbwang1997 authored and BIGWangYuDong committed Aug 26, 2022
1 parent 444654a commit ceae85b
Show file tree
Hide file tree
Showing 34 changed files with 63 additions and 93 deletions.
1 change: 0 additions & 1 deletion docs/en/tutorials/customize_runtime.md
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,6 @@ The users can do those fine-grained parameter tuning through customizing optimiz
from mmcv.utils import build_from_cfg

from mmcv.runner.optimizer import OPTIMIZER_BUILDERS, OPTIMIZERS
from mmdet.utils import get_root_logger
from .my_optimizer import MyOptimizer


Expand Down
4 changes: 2 additions & 2 deletions mmdet/engine/optimizers/layer_decay_optimizer_constructor.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@

import torch.nn as nn
from mmengine.dist import get_dist_info
from mmengine.logging import MMLogger
from mmengine.optim import DefaultOptimWrapperConstructor

from mmdet.registry import OPTIM_WRAPPER_CONSTRUCTORS
from mmdet.utils import get_root_logger


def get_layer_id_for_convnext(var_name, max_layer_id):
Expand Down Expand Up @@ -95,7 +95,7 @@ def add_params(self, params: List[dict], module: nn.Module,
in place.
module (nn.Module): The module to be added.
"""
logger = get_root_logger()
logger = MMLogger.get_current_instance()

parameter_groups = {}
logger.info(f'self.paramwise_cfg is {self.paramwise_cfg}')
Expand Down
4 changes: 2 additions & 2 deletions mmdet/models/backbones/detectors_resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,13 @@
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmengine.logging import MMLogger
from mmengine.model import Sequential
from mmengine.model.utils import constant_init, kaiming_init
from mmengine.runner.checkpoint import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm

from mmdet.registry import MODELS
from mmdet.utils import get_root_logger
from .resnet import BasicBlock
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
Expand Down Expand Up @@ -299,7 +299,7 @@ def init_weights(self):
# super(DetectoRS_ResNet, self).init_weights()

if isinstance(self.pretrained, str):
logger = get_root_logger()
logger = MMLogger.get_current_instance()
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
Expand Down
4 changes: 2 additions & 2 deletions mmdet/models/backbones/pvt.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,14 @@
from mmcv.cnn import Conv2d, build_activation_layer, build_norm_layer
from mmcv.cnn.bricks.drop import build_dropout
from mmcv.cnn.bricks.transformer import MultiheadAttention
from mmengine.logging import MMLogger
from mmengine.model import BaseModule, ModuleList, Sequential
from mmengine.model.utils import (constant_init, normal_init, trunc_normal_,
trunc_normal_init)
from mmengine.runner.checkpoint import CheckpointLoader, load_state_dict
from torch.nn.modules.utils import _pair as to_2tuple

from mmdet.registry import MODELS
from ...utils import get_root_logger
from ..layers import PatchEmbed, nchw_to_nlc, nlc_to_nchw


Expand Down Expand Up @@ -522,7 +522,7 @@ def __init__(self,
cur += num_layer

def init_weights(self):
logger = get_root_logger()
logger = MMLogger.get_current_instance()
if self.init_cfg is None:
logger.warn(f'No pre-trained weights for '
f'{self.__class__.__name__}, '
Expand Down
4 changes: 2 additions & 2 deletions mmdet/models/backbones/swin.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,14 @@
import torch.utils.checkpoint as cp
from mmcv.cnn import build_norm_layer
from mmcv.cnn.bricks.transformer import FFN, build_dropout
from mmengine.logging import MMLogger
from mmengine.model import BaseModule, ModuleList
from mmengine.model.utils import (constant_init, trunc_normal_,
trunc_normal_init)
from mmengine.runner.checkpoint import CheckpointLoader
from mmengine.utils import to_2tuple

from mmdet.registry import MODELS
from ...utils import get_root_logger
from ..layers import PatchEmbed, PatchMerging


Expand Down Expand Up @@ -669,7 +669,7 @@ def _freeze_stages(self):
param.requires_grad = False

def init_weights(self):
logger = get_root_logger()
logger = MMLogger.get_current_instance()
if self.init_cfg is None:
logger.warn(f'No pre-trained weights for '
f'{self.__class__.__name__}, '
Expand Down
2 changes: 0 additions & 2 deletions mmdet/models/losses/accuracy.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn


@mmcv.jit(coderize=True)
def accuracy(pred, target, topk=1, thresh=None):
"""Calculate accuracy according to the prediction and target.
Expand Down
2 changes: 0 additions & 2 deletions mmdet/models/losses/ae_loss.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F

from mmdet.registry import MODELS


@mmcv.jit(derivate=True, coderize=True)
def ae_loss_per_image(tl_preds, br_preds, match):
"""Associative Embedding Loss in one image.
Expand Down
2 changes: 0 additions & 2 deletions mmdet/models/losses/balanced_l1_loss.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
import torch.nn as nn
Expand All @@ -8,7 +7,6 @@
from .utils import weighted_loss


@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def balanced_l1_loss(pred,
target,
Expand Down
3 changes: 0 additions & 3 deletions mmdet/models/losses/gfocal_loss.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
import torch.nn.functional as F

from mmdet.registry import MODELS
from .utils import weighted_loss


@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def quality_focal_loss(pred, target, beta=2.0):
r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
Expand Down Expand Up @@ -98,7 +96,6 @@ def quality_focal_loss_with_prob(pred, target, beta=2.0):
return loss


@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def distribution_focal_loss(pred, label):
r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning
Expand Down
6 changes: 0 additions & 6 deletions mmdet/models/losses/iou_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import math
import warnings

import mmcv
import torch
import torch.nn as nn

Expand All @@ -11,7 +10,6 @@
from .utils import weighted_loss


@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def iou_loss(pred, target, linear=False, mode='log', eps=1e-6):
"""IoU loss.
Expand Down Expand Up @@ -50,7 +48,6 @@ def iou_loss(pred, target, linear=False, mode='log', eps=1e-6):
return loss


@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3):
"""BIoULoss.
Expand Down Expand Up @@ -97,7 +94,6 @@ def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3):
return loss


@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def giou_loss(pred, target, eps=1e-7):
r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
Expand All @@ -117,7 +113,6 @@ def giou_loss(pred, target, eps=1e-7):
return loss


@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def diou_loss(pred, target, eps=1e-7):
r"""`Implementation of Distance-IoU Loss: Faster and Better
Expand Down Expand Up @@ -172,7 +167,6 @@ def diou_loss(pred, target, eps=1e-7):
return loss


@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def ciou_loss(pred, target, eps=1e-7):
r"""`Implementation of paper `Enhancing Geometric Factors into
Expand Down
2 changes: 0 additions & 2 deletions mmdet/models/losses/kd_loss.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
import torch.nn.functional as F

from mmdet.registry import MODELS
from .utils import weighted_loss


@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def knowledge_distillation_kl_div_loss(pred,
soft_label,
Expand Down
3 changes: 0 additions & 3 deletions mmdet/models/losses/pisa_loss.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple

import mmcv
import torch
import torch.nn as nn
from torch import Tensor
Expand All @@ -11,7 +10,6 @@
from ..task_modules.samplers import SamplingResult


@mmcv.jit(derivate=True, coderize=True)
def isr_p(cls_score: Tensor,
bbox_pred: Tensor,
bbox_targets: Tuple[Tensor],
Expand Down Expand Up @@ -125,7 +123,6 @@ def isr_p(cls_score: Tensor,
return bbox_targets


@mmcv.jit(derivate=True, coderize=True)
def carl_loss(cls_score: Tensor,
labels: Tensor,
bbox_pred: Tensor,
Expand Down
3 changes: 0 additions & 3 deletions mmdet/models/losses/smooth_l1_loss.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
import torch.nn as nn

from mmdet.registry import MODELS
from .utils import weighted_loss


@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
"""Smooth L1 loss.
Expand All @@ -32,7 +30,6 @@ def smooth_l1_loss(pred, target, beta=1.0):
return loss


@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def l1_loss(pred, target):
"""L1 loss.
Expand Down
2 changes: 0 additions & 2 deletions mmdet/models/losses/utils.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
# Copyright (c) OpenMMLab. All rights reserved.
import functools

import mmcv
import torch
import torch.nn.functional as F

Expand All @@ -26,7 +25,6 @@ def reduce_loss(loss, reduction):
return loss.sum()


@mmcv.jit(derivate=True, coderize=True)
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Expand Down
2 changes: 0 additions & 2 deletions mmdet/models/losses/varifocal_loss.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
import torch.nn.functional as F

from mmdet.registry import MODELS
from .utils import weight_reduce_loss


@mmcv.jit(derivate=True, coderize=True)
def varifocal_loss(pred,
target,
weight=None,
Expand Down
4 changes: 0 additions & 4 deletions mmdet/models/task_modules/coders/bucketing_bbox_coder.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
import torch.nn.functional as F
Expand Down Expand Up @@ -92,7 +91,6 @@ def decode(self, bboxes, pred_bboxes, max_shape=None):
return decoded_bboxes


@mmcv.jit(coderize=True)
def generat_buckets(proposals, num_buckets, scale_factor=1.0):
"""Generate buckets w.r.t bucket number and scale factor of proposals.
Expand Down Expand Up @@ -141,7 +139,6 @@ def generat_buckets(proposals, num_buckets, scale_factor=1.0):
return bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets


@mmcv.jit(coderize=True)
def bbox2bucket(proposals,
gt,
num_buckets,
Expand Down Expand Up @@ -265,7 +262,6 @@ def bbox2bucket(proposals,
return offsets, offsets_weights, bucket_labels, bucket_cls_weights


@mmcv.jit(coderize=True)
def bucket2bbox(proposals,
cls_preds,
offset_preds,
Expand Down
3 changes: 0 additions & 3 deletions mmdet/models/task_modules/coders/delta_xywh_bbox_coder.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
# Copyright (c) OpenMMLab. All rights reserved.
import warnings

import mmcv
import numpy as np
import torch

Expand Down Expand Up @@ -114,7 +113,6 @@ def decode(self,
return decoded_bboxes


@mmcv.jit(coderize=True)
def bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)):
"""Compute deltas of proposals w.r.t. gt.
Expand Down Expand Up @@ -160,7 +158,6 @@ def bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)):
return deltas


@mmcv.jit(coderize=True)
def delta2bbox(rois,
deltas,
means=(0., 0., 0., 0.),
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch

Expand Down Expand Up @@ -81,7 +80,6 @@ def decode(self,
return decoded_bboxes


@mmcv.jit(coderize=True)
def legacy_bbox2delta(proposals,
gt,
means=(0., 0., 0., 0.),
Expand Down Expand Up @@ -130,7 +128,6 @@ def legacy_bbox2delta(proposals,
return deltas


@mmcv.jit(coderize=True)
def legacy_delta2bbox(rois,
deltas,
means=(0., 0., 0., 0.),
Expand Down
3 changes: 0 additions & 3 deletions mmdet/models/task_modules/coders/tblr_bbox_coder.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch

from mmdet.registry import TASK_UTILS
Expand Down Expand Up @@ -73,7 +72,6 @@ def decode(self, bboxes, pred_bboxes, max_shape=None):
return decoded_bboxes


@mmcv.jit(coderize=True)
def bboxes2tblr(priors, gts, normalizer=4.0, normalize_by_wh=True):
"""Encode ground truth boxes to tblr coordinate.
Expand Down Expand Up @@ -120,7 +118,6 @@ def bboxes2tblr(priors, gts, normalizer=4.0, normalize_by_wh=True):
return loc / normalizer


@mmcv.jit(coderize=True)
def tblr2bboxes(priors,
tblr,
normalizer=4.0,
Expand Down
Loading

0 comments on commit ceae85b

Please sign in to comment.