From 4219abeb584d44b49c0ced74a3723b739bc4f01b Mon Sep 17 00:00:00 2001 From: NoFish-528 Date: Fri, 18 Nov 2022 11:08:05 +0800 Subject: [PATCH 1/7] [Fix]:add type hints for res_layer --- mmdet/models/layers/res_layer.py | 51 ++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/mmdet/models/layers/res_layer.py b/mmdet/models/layers/res_layer.py index 8c499a2d088..856782e38a9 100644 --- a/mmdet/models/layers/res_layer.py +++ b/mmdet/models/layers/res_layer.py @@ -1,8 +1,13 @@ # Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + from mmcv.cnn import build_conv_layer, build_norm_layer from mmengine.model import BaseModule, Sequential +from torch import Tensor from torch import nn as nn +from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig + class ResLayer(Sequential): """ResLayer to build ResNet style backbone. @@ -24,16 +29,16 @@ class ResLayer(Sequential): """ def __init__(self, - block, - inplanes, - planes, - num_blocks, - stride=1, - avg_down=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - downsample_first=True, - **kwargs): + block: BaseModule, + inplanes: int, + planes: int, + num_blocks: int, + stride: int = 1, + avg_down: bool = False, + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict(type='BN'), + downsample_first: bool = True, + **kwargs) -> None: self.block = block downsample = None @@ -114,18 +119,18 @@ class SimplifiedBasicBlock(BaseModule): expansion = 1 def __init__(self, - inplanes, - planes, - stride=1, - dilation=1, - downsample=None, - style='pytorch', - with_cp=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - dcn=None, - plugins=None, - init_fg=None): + inplanes: int, + planes: int, + stride: int = 1, + dilation: int = 1, + downsample: Optional[Sequential] = None, + style: ConfigType = 'pytorch', + with_cp: bool = False, + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict(type='BN'), + dcn: OptConfigType = None, + plugins: OptConfigType = None, + init_fg: OptMultiConfig = None) -> None: super(SimplifiedBasicBlock, self).__init__(init_fg) assert dcn is None, 'Not implemented yet.' assert plugins is None, 'Not implemented yet.' @@ -168,7 +173,7 @@ def norm2(self): """nn.Module: normalization layer after the second convolution layer""" return getattr(self, self.norm2_name) if self.with_norm else None - def forward(self, x): + def forward(self, x: Tensor) -> Tensor: """Forward function.""" identity = x From cff1bd662e58c7a2cbcee66d1b566bb8733d2be9 Mon Sep 17 00:00:00 2001 From: Zhikang Niu <73390819+NoFish-528@users.noreply.github.com> Date: Fri, 18 Nov 2022 14:16:40 +0800 Subject: [PATCH 2/7] Update mmdet/models/layers/res_layer.py Co-authored-by: BigDong --- mmdet/models/layers/res_layer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmdet/models/layers/res_layer.py b/mmdet/models/layers/res_layer.py index 856782e38a9..45396cd1fd3 100644 --- a/mmdet/models/layers/res_layer.py +++ b/mmdet/models/layers/res_layer.py @@ -131,7 +131,7 @@ def __init__(self, dcn: OptConfigType = None, plugins: OptConfigType = None, init_fg: OptMultiConfig = None) -> None: - super(SimplifiedBasicBlock, self).__init__(init_fg) + super().__init__(init_fg=init_fg) assert dcn is None, 'Not implemented yet.' assert plugins is None, 'Not implemented yet.' assert not with_cp, 'Not implemented yet.' From 34c306625fc68b6bfa0536ad28ae8b5948f65965 Mon Sep 17 00:00:00 2001 From: Zhikang Niu <73390819+NoFish-528@users.noreply.github.com> Date: Fri, 18 Nov 2022 14:32:04 +0800 Subject: [PATCH 3/7] [Fix]:add type hint of norm1 and norm2 --- mmdet/models/layers/res_layer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mmdet/models/layers/res_layer.py b/mmdet/models/layers/res_layer.py index 45396cd1fd3..3d6cbefdf4c 100644 --- a/mmdet/models/layers/res_layer.py +++ b/mmdet/models/layers/res_layer.py @@ -164,12 +164,12 @@ def __init__(self, self.with_cp = with_cp @property - def norm1(self): + def norm1(self) -> Optional[BaseModule]: """nn.Module: normalization layer after the first convolution layer""" return getattr(self, self.norm1_name) if self.with_norm else None @property - def norm2(self): + def norm2(self) -> Optional[BaseModule]: """nn.Module: normalization layer after the second convolution layer""" return getattr(self, self.norm2_name) if self.with_norm else None From 54c6a6fcb1cdb52fdc11b225012bbf6f2410a027 Mon Sep 17 00:00:00 2001 From: Zhikang Niu <73390819+NoFish-528@users.noreply.github.com> Date: Mon, 12 Dec 2022 10:47:27 +0800 Subject: [PATCH 4/7] [WIP]:add res_layer type hints --- mmdet/models/layers/res_layer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmdet/models/layers/res_layer.py b/mmdet/models/layers/res_layer.py index 3d6cbefdf4c..0e36504ddb1 100644 --- a/mmdet/models/layers/res_layer.py +++ b/mmdet/models/layers/res_layer.py @@ -106,7 +106,7 @@ def __init__(self, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) - super(ResLayer, self).__init__(*layers) + super().__init__(*layers) class SimplifiedBasicBlock(BaseModule): From 4dac59fd4a78015af72d6684ff333367bab19c8a Mon Sep 17 00:00:00 2001 From: NoFish-528 Date: Mon, 12 Dec 2022 11:13:55 +0800 Subject: [PATCH 5/7] [WIP]:add layer type hints about issue 9234 --- mmdet/models/layers/normed_predictor.py | 30 +++++++++------- mmdet/models/layers/positional_encoding.py | 40 ++++++++++++---------- mmdet/models/layers/se_layer.py | 2 +- 3 files changed, 41 insertions(+), 31 deletions(-) diff --git a/mmdet/models/layers/normed_predictor.py b/mmdet/models/layers/normed_predictor.py index 558d815e59b..41bab7c3a17 100644 --- a/mmdet/models/layers/normed_predictor.py +++ b/mmdet/models/layers/normed_predictor.py @@ -2,6 +2,7 @@ import torch import torch.nn as nn import torch.nn.functional as F +from torch import Tensor from mmdet.registry import MODELS @@ -19,19 +20,24 @@ class NormedLinear(nn.Linear): keep numerical stability. Default to 1e-6. """ - def __init__(self, *args, tempearture=20, power=1.0, eps=1e-6, **kwargs): - super(NormedLinear, self).__init__(*args, **kwargs) + def __init__(self, + *args, + tempearture: float = 20, + power: int = 1.0, + eps: float = 1e-6, + **kwargs) -> None: + super().__init__(*args, **kwargs) self.tempearture = tempearture self.power = power self.eps = eps self.init_weights() - def init_weights(self): + def init_weights(self) -> None: nn.init.normal_(self.weight, mean=0, std=0.01) if self.bias is not None: nn.init.constant_(self.bias, 0) - def forward(self, x): + def forward(self, x) -> Tensor: weight_ = self.weight / ( self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps) x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps) @@ -50,23 +56,23 @@ class NormedConv2d(nn.Conv2d): eps (float, optional): The minimal value of divisor to keep numerical stability. Default to 1e-6. norm_over_kernel (bool, optional): Normalize over kernel. - Default to False. + Default: False. """ def __init__(self, *args, - tempearture=20, - power=1.0, - eps=1e-6, - norm_over_kernel=False, - **kwargs): - super(NormedConv2d, self).__init__(*args, **kwargs) + tempearture: float = 20, + power: int = 1.0, + eps: float = 1e-6, + norm_over_kernel: bool = False, + **kwargs) -> None: + super().__init__(*args, **kwargs) self.tempearture = tempearture self.power = power self.norm_over_kernel = norm_over_kernel self.eps = eps - def forward(self, x): + def forward(self, x) -> Tensor: if not self.norm_over_kernel: weight_ = self.weight / ( self.weight.norm(dim=1, keepdim=True).pow(self.power) + diff --git a/mmdet/models/layers/positional_encoding.py b/mmdet/models/layers/positional_encoding.py index 859052f7f66..e7f8811cf27 100644 --- a/mmdet/models/layers/positional_encoding.py +++ b/mmdet/models/layers/positional_encoding.py @@ -4,8 +4,10 @@ import torch import torch.nn as nn from mmengine.model import BaseModule +from torch import Tensor from mmdet.registry import MODELS +from mmdet.utils import MultiConfig, OptMultiConfig @MODELS.register_module() @@ -35,14 +37,14 @@ class SinePositionalEncoding(BaseModule): """ def __init__(self, - num_feats, - temperature=10000, - normalize=False, - scale=2 * math.pi, - eps=1e-6, - offset=0., - init_cfg=None): - super(SinePositionalEncoding, self).__init__(init_cfg) + num_feats: int, + temperature: int = 10000, + normalize: bool = False, + scale: float = 2 * math.pi, + eps: float = 1e-6, + offset: float = 0., + init_cfg: OptMultiConfig = None) -> None: + super().__init__(init_cfg) if normalize: assert isinstance(scale, (float, int)), 'when normalize is set,' \ 'scale should be provided and in float or int type, ' \ @@ -54,7 +56,7 @@ def __init__(self, self.eps = eps self.offset = offset - def forward(self, mask): + def forward(self, mask) -> Tensor: """Forward function for `SinePositionalEncoding`. Args: @@ -93,7 +95,7 @@ def forward(self, mask): pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos - def __repr__(self): + def __repr__(self) -> str: """str: a string that describes the module""" repr_str = self.__class__.__name__ repr_str += f'(num_feats={self.num_feats}, ' @@ -119,19 +121,21 @@ class LearnedPositionalEncoding(BaseModule): init_cfg (dict or list[dict], optional): Initialization config dict. """ - def __init__(self, - num_feats, - row_num_embed=50, - col_num_embed=50, - init_cfg=dict(type='Uniform', layer='Embedding')): - super(LearnedPositionalEncoding, self).__init__(init_cfg) + def __init__( + self, + num_feats: int, + row_num_embed: int = 50, + col_num_embed: int = 50, + init_cfg: MultiConfig = dict(type='Uniform', layer='Embedding') + ) -> None: + super().__init__(init_cfg) self.row_embed = nn.Embedding(row_num_embed, num_feats) self.col_embed = nn.Embedding(col_num_embed, num_feats) self.num_feats = num_feats self.row_num_embed = row_num_embed self.col_num_embed = col_num_embed - def forward(self, mask): + def forward(self, mask) -> Tensor: """Forward function for `LearnedPositionalEncoding`. Args: @@ -155,7 +159,7 @@ def forward(self, mask): 1).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1) return pos - def __repr__(self): + def __repr__(self) -> str: """str: a string that describes the module""" repr_str = self.__class__.__name__ repr_str += f'(num_feats={self.num_feats}, ' diff --git a/mmdet/models/layers/se_layer.py b/mmdet/models/layers/se_layer.py index 5c4d453aa0c..7718d078a1c 100644 --- a/mmdet/models/layers/se_layer.py +++ b/mmdet/models/layers/se_layer.py @@ -140,7 +140,7 @@ class ChannelAttention(BaseModule): Args: channels (int): The input (and output) channels of the attention layer. init_cfg (dict or list[dict], optional): Initialization config dict. - Defaults to None. + Defaults: None """ def __init__(self, channels: int, init_cfg: OptMultiConfig = None) -> None: From beee50d2b046081fb81f45b76a86b37d05004adb Mon Sep 17 00:00:00 2001 From: NoFish-528 Date: Wed, 28 Dec 2022 20:19:48 +0800 Subject: [PATCH 6/7] [FIX]:add all type hints and change some function note --- mmdet/models/layers/normed_predictor.py | 6 +++--- mmdet/models/layers/positional_encoding.py | 14 +++++++------- mmdet/models/layers/res_layer.py | 10 +++++----- mmdet/models/layers/se_layer.py | 18 +++++++++--------- 4 files changed, 24 insertions(+), 24 deletions(-) diff --git a/mmdet/models/layers/normed_predictor.py b/mmdet/models/layers/normed_predictor.py index 41bab7c3a17..6bf3618b9bf 100644 --- a/mmdet/models/layers/normed_predictor.py +++ b/mmdet/models/layers/normed_predictor.py @@ -37,7 +37,7 @@ def init_weights(self) -> None: if self.bias is not None: nn.init.constant_(self.bias, 0) - def forward(self, x) -> Tensor: + def forward(self, x: Tensor) -> Tensor: weight_ = self.weight / ( self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps) x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps) @@ -56,7 +56,7 @@ class NormedConv2d(nn.Conv2d): eps (float, optional): The minimal value of divisor to keep numerical stability. Default to 1e-6. norm_over_kernel (bool, optional): Normalize over kernel. - Default: False. + Default to False. """ def __init__(self, @@ -72,7 +72,7 @@ def __init__(self, self.norm_over_kernel = norm_over_kernel self.eps = eps - def forward(self, x) -> Tensor: + def forward(self, x: Tensor) -> Tensor: if not self.norm_over_kernel: weight_ = self.weight / ( self.weight.norm(dim=1, keepdim=True).pow(self.power) + diff --git a/mmdet/models/layers/positional_encoding.py b/mmdet/models/layers/positional_encoding.py index e7f8811cf27..ff4c5d6076e 100644 --- a/mmdet/models/layers/positional_encoding.py +++ b/mmdet/models/layers/positional_encoding.py @@ -33,7 +33,7 @@ class SinePositionalEncoding(BaseModule): offset (float): offset add to embed when do the normalization. Defaults to 0. init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None + Default to None """ def __init__(self, @@ -44,7 +44,7 @@ def __init__(self, eps: float = 1e-6, offset: float = 0., init_cfg: OptMultiConfig = None) -> None: - super().__init__(init_cfg) + super().__init__(init_cfg=init_cfg) if normalize: assert isinstance(scale, (float, int)), 'when normalize is set,' \ 'scale should be provided and in float or int type, ' \ @@ -56,7 +56,7 @@ def __init__(self, self.eps = eps self.offset = offset - def forward(self, mask) -> Tensor: + def forward(self, mask: Tensor) -> Tensor: """Forward function for `SinePositionalEncoding`. Args: @@ -115,9 +115,9 @@ class LearnedPositionalEncoding(BaseModule): along x-axis or y-axis. The final returned dimension for each position is 2 times of this value. row_num_embed (int, optional): The dictionary size of row embeddings. - Default 50. + Default to 50. col_num_embed (int, optional): The dictionary size of col embeddings. - Default 50. + Default to 50. init_cfg (dict or list[dict], optional): Initialization config dict. """ @@ -128,14 +128,14 @@ def __init__( col_num_embed: int = 50, init_cfg: MultiConfig = dict(type='Uniform', layer='Embedding') ) -> None: - super().__init__(init_cfg) + super().__init__(init_cfg=init_cfg) self.row_embed = nn.Embedding(row_num_embed, num_feats) self.col_embed = nn.Embedding(col_num_embed, num_feats) self.num_feats = num_feats self.row_num_embed = row_num_embed self.col_num_embed = col_num_embed - def forward(self, mask) -> Tensor: + def forward(self, mask: Tensor) -> Tensor: """Forward function for `LearnedPositionalEncoding`. Args: diff --git a/mmdet/models/layers/res_layer.py b/mmdet/models/layers/res_layer.py index 0e36504ddb1..1ae8da7e049 100644 --- a/mmdet/models/layers/res_layer.py +++ b/mmdet/models/layers/res_layer.py @@ -17,15 +17,15 @@ class ResLayer(Sequential): inplanes (int): inplanes of block. planes (int): planes of block. num_blocks (int): number of blocks. - stride (int): stride of the first block. Default: 1 + stride (int): stride of the first block. Default to 1 avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottleneck. Default: False + downsampling in the bottleneck. Default to False conv_cfg (dict): dictionary to construct and config conv layer. - Default: None + Default to None norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') + Default to dict(type='BN') downsample_first (bool): Downsample at the first block or last block. - False for Hourglass, True for ResNet. Default: True + False for Hourglass, True for ResNet. Default to True """ def __init__(self, diff --git a/mmdet/models/layers/se_layer.py b/mmdet/models/layers/se_layer.py index 7718d078a1c..a7f909a79ab 100644 --- a/mmdet/models/layers/se_layer.py +++ b/mmdet/models/layers/se_layer.py @@ -15,17 +15,17 @@ class SELayer(BaseModule): Args: channels (int): The input (and output) channels of the SE layer. ratio (int): Squeeze ratio in SELayer, the intermediate channel will be - ``int(channels/ratio)``. Default: 16. + ``int(channels/ratio)``. Default to 16. conv_cfg (None or dict): Config dict for convolution layer. - Default: None, which means using conv2d. + Default to None, which means using conv2d. act_cfg (dict or Sequence[dict]): Config dict for activation layer. If act_cfg is a dict, two activation layers will be configurated by this dict. If act_cfg is a sequence of dicts, the first activation layer will be configurated by the first dict and the second activation layer will be configurated by the second dict. - Default: (dict(type='ReLU'), dict(type='Sigmoid')) + Default to (dict(type='ReLU'), dict(type='Sigmoid')) init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None + Default to None """ def __init__(self, @@ -75,18 +75,18 @@ class DyReLU(BaseModule): channels (int): The input (and output) channels of DyReLU module. ratio (int): Squeeze ratio in Squeeze-and-Excitation-like module, the intermediate channel will be ``int(channels/ratio)``. - Default: 4. + Default to 4. conv_cfg (None or dict): Config dict for convolution layer. - Default: None, which means using conv2d. + Default to None, which means using conv2d. act_cfg (dict or Sequence[dict]): Config dict for activation layer. If act_cfg is a dict, two activation layers will be configurated by this dict. If act_cfg is a sequence of dicts, the first activation layer will be configurated by the first dict and the second activation layer will be configurated by the second dict. - Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0, + Default to (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0, divisor=6.0)) init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None + Default to None """ def __init__(self, @@ -140,7 +140,7 @@ class ChannelAttention(BaseModule): Args: channels (int): The input (and output) channels of the attention layer. init_cfg (dict or list[dict], optional): Initialization config dict. - Defaults: None + Defaults to None """ def __init__(self, channels: int, init_cfg: OptMultiConfig = None) -> None: From bfc849cfddd48d3b4cf2d8b7412fa4f06373b862 Mon Sep 17 00:00:00 2001 From: NoFish-528 Date: Sun, 1 Jan 2023 11:03:40 +0800 Subject: [PATCH 7/7] [FIX]:add docstrings and change default -> defaults --- mmdet/models/layers/normed_predictor.py | 17 ++++++++++------- mmdet/models/layers/positional_encoding.py | 6 +++--- mmdet/models/layers/res_layer.py | 16 ++++++++-------- mmdet/models/layers/se_layer.py | 18 ++++++++++-------- 4 files changed, 31 insertions(+), 26 deletions(-) diff --git a/mmdet/models/layers/normed_predictor.py b/mmdet/models/layers/normed_predictor.py index 6bf3618b9bf..9fb40c71c42 100644 --- a/mmdet/models/layers/normed_predictor.py +++ b/mmdet/models/layers/normed_predictor.py @@ -14,10 +14,10 @@ class NormedLinear(nn.Linear): """Normalized Linear Layer. Args: - tempeature (float, optional): Tempeature term. Default to 20. - power (int, optional): Power term. Default to 1.0. + tempeature (float, optional): Tempeature term. Defaults to 20. + power (int, optional): Power term. Defaults to 1.0. eps (float, optional): The minimal value of divisor to - keep numerical stability. Default to 1e-6. + keep numerical stability. Defaults to 1e-6. """ def __init__(self, @@ -33,11 +33,13 @@ def __init__(self, self.init_weights() def init_weights(self) -> None: + """Initialize the weights.""" nn.init.normal_(self.weight, mean=0, std=0.01) if self.bias is not None: nn.init.constant_(self.bias, 0) def forward(self, x: Tensor) -> Tensor: + """Forward function for `NormedLinear`.""" weight_ = self.weight / ( self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps) x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps) @@ -51,12 +53,12 @@ class NormedConv2d(nn.Conv2d): """Normalized Conv2d Layer. Args: - tempeature (float, optional): Tempeature term. Default to 20. - power (int, optional): Power term. Default to 1.0. + tempeature (float, optional): Tempeature term. Defaults to 20. + power (int, optional): Power term. Defaults to 1.0. eps (float, optional): The minimal value of divisor to - keep numerical stability. Default to 1e-6. + keep numerical stability. Defaults to 1e-6. norm_over_kernel (bool, optional): Normalize over kernel. - Default to False. + Defaults to False. """ def __init__(self, @@ -73,6 +75,7 @@ def __init__(self, self.eps = eps def forward(self, x: Tensor) -> Tensor: + """Forward function for `NormedConv2d`.""" if not self.norm_over_kernel: weight_ = self.weight / ( self.weight.norm(dim=1, keepdim=True).pow(self.power) + diff --git a/mmdet/models/layers/positional_encoding.py b/mmdet/models/layers/positional_encoding.py index ff4c5d6076e..9367f0aaf0c 100644 --- a/mmdet/models/layers/positional_encoding.py +++ b/mmdet/models/layers/positional_encoding.py @@ -33,7 +33,7 @@ class SinePositionalEncoding(BaseModule): offset (float): offset add to embed when do the normalization. Defaults to 0. init_cfg (dict or list[dict], optional): Initialization config dict. - Default to None + Defaults to None """ def __init__(self, @@ -115,9 +115,9 @@ class LearnedPositionalEncoding(BaseModule): along x-axis or y-axis. The final returned dimension for each position is 2 times of this value. row_num_embed (int, optional): The dictionary size of row embeddings. - Default to 50. + Defaults to 50. col_num_embed (int, optional): The dictionary size of col embeddings. - Default to 50. + Defaults to 50. init_cfg (dict or list[dict], optional): Initialization config dict. """ diff --git a/mmdet/models/layers/res_layer.py b/mmdet/models/layers/res_layer.py index 1ae8da7e049..ff24d3e8562 100644 --- a/mmdet/models/layers/res_layer.py +++ b/mmdet/models/layers/res_layer.py @@ -17,15 +17,15 @@ class ResLayer(Sequential): inplanes (int): inplanes of block. planes (int): planes of block. num_blocks (int): number of blocks. - stride (int): stride of the first block. Default to 1 + stride (int): stride of the first block. Defaults to 1 avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottleneck. Default to False + downsampling in the bottleneck. Defaults to False conv_cfg (dict): dictionary to construct and config conv layer. - Default to None + Defaults to None norm_cfg (dict): dictionary to construct and config norm layer. - Default to dict(type='BN') + Defaults to dict(type='BN') downsample_first (bool): Downsample at the first block or last block. - False for Hourglass, True for ResNet. Default to True + False for Hourglass, True for ResNet. Defaults to True """ def __init__(self, @@ -130,8 +130,8 @@ def __init__(self, norm_cfg: ConfigType = dict(type='BN'), dcn: OptConfigType = None, plugins: OptConfigType = None, - init_fg: OptMultiConfig = None) -> None: - super().__init__(init_fg=init_fg) + init_cfg: OptMultiConfig = None) -> None: + super().__init__(init_cfg=init_cfg) assert dcn is None, 'Not implemented yet.' assert plugins is None, 'Not implemented yet.' assert not with_cp, 'Not implemented yet.' @@ -174,7 +174,7 @@ def norm2(self) -> Optional[BaseModule]: return getattr(self, self.norm2_name) if self.with_norm else None def forward(self, x: Tensor) -> Tensor: - """Forward function.""" + """Forward function for SimplifiedBasicBlock.""" identity = x diff --git a/mmdet/models/layers/se_layer.py b/mmdet/models/layers/se_layer.py index a7f909a79ab..a90a48bd55b 100644 --- a/mmdet/models/layers/se_layer.py +++ b/mmdet/models/layers/se_layer.py @@ -15,17 +15,17 @@ class SELayer(BaseModule): Args: channels (int): The input (and output) channels of the SE layer. ratio (int): Squeeze ratio in SELayer, the intermediate channel will be - ``int(channels/ratio)``. Default to 16. + ``int(channels/ratio)``. Defaults to 16. conv_cfg (None or dict): Config dict for convolution layer. - Default to None, which means using conv2d. + Defaults to None, which means using conv2d. act_cfg (dict or Sequence[dict]): Config dict for activation layer. If act_cfg is a dict, two activation layers will be configurated by this dict. If act_cfg is a sequence of dicts, the first activation layer will be configurated by the first dict and the second activation layer will be configurated by the second dict. - Default to (dict(type='ReLU'), dict(type='Sigmoid')) + Defaults to (dict(type='ReLU'), dict(type='Sigmoid')) init_cfg (dict or list[dict], optional): Initialization config dict. - Default to None + Defaults to None """ def __init__(self, @@ -57,6 +57,7 @@ def __init__(self, act_cfg=act_cfg[1]) def forward(self, x: Tensor) -> Tensor: + """Forward function for SELayer.""" out = self.global_avgpool(x) out = self.conv1(out) out = self.conv2(out) @@ -75,18 +76,18 @@ class DyReLU(BaseModule): channels (int): The input (and output) channels of DyReLU module. ratio (int): Squeeze ratio in Squeeze-and-Excitation-like module, the intermediate channel will be ``int(channels/ratio)``. - Default to 4. + Defaults to 4. conv_cfg (None or dict): Config dict for convolution layer. - Default to None, which means using conv2d. + Defaults to None, which means using conv2d. act_cfg (dict or Sequence[dict]): Config dict for activation layer. If act_cfg is a dict, two activation layers will be configurated by this dict. If act_cfg is a sequence of dicts, the first activation layer will be configurated by the first dict and the second activation layer will be configurated by the second dict. - Default to (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0, + Defaults to (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0, divisor=6.0)) init_cfg (dict or list[dict], optional): Initialization config dict. - Default to None + Defaults to None """ def __init__(self, @@ -150,6 +151,7 @@ def __init__(self, channels: int, init_cfg: OptMultiConfig = None) -> None: self.act = nn.Hardsigmoid(inplace=True) def forward(self, x: Tensor) -> Tensor: + """Forward function for ChannelAttention.""" out = self.global_avgpool(x) out = self.fc(out) out = self.act(out)