Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

CodeCamp #33 [Fix]:add type hints for res_layer, se_layer,normed_predictor,positional_encoding #9346

Merged
merged 7 commits into from
Jan 3, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 27 additions & 18 deletions mmdet/models/layers/normed_predictor.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor

from mmdet.registry import MODELS

Expand All @@ -13,25 +14,32 @@ class NormedLinear(nn.Linear):
"""Normalized Linear Layer.

Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
tempeature (float, optional): Tempeature term. Defaults to 20.
power (int, optional): Power term. Defaults to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
keep numerical stability. Defaults to 1e-6.
"""

def __init__(self, *args, tempearture=20, power=1.0, eps=1e-6, **kwargs):
super(NormedLinear, self).__init__(*args, **kwargs)
def __init__(self,
*args,
tempearture: float = 20,
power: int = 1.0,
eps: float = 1e-6,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.eps = eps
self.init_weights()

def init_weights(self):
def init_weights(self) -> None:
ZwwWayne marked this conversation as resolved.
Show resolved Hide resolved
"""Initialize the weights."""
nn.init.normal_(self.weight, mean=0, std=0.01)
if self.bias is not None:
nn.init.constant_(self.bias, 0)

def forward(self, x):
def forward(self, x: Tensor) -> Tensor:
"""Forward function for `NormedLinear`."""
weight_ = self.weight / (
ZwwWayne marked this conversation as resolved.
Show resolved Hide resolved
self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
Expand All @@ -45,28 +53,29 @@ class NormedConv2d(nn.Conv2d):
"""Normalized Conv2d Layer.

Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
tempeature (float, optional): Tempeature term. Defaults to 20.
power (int, optional): Power term. Defaults to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
keep numerical stability. Defaults to 1e-6.
norm_over_kernel (bool, optional): Normalize over kernel.
Default to False.
Defaults to False.
"""

ZwwWayne marked this conversation as resolved.
Show resolved Hide resolved
def __init__(self,
*args,
tempearture=20,
power=1.0,
eps=1e-6,
norm_over_kernel=False,
**kwargs):
super(NormedConv2d, self).__init__(*args, **kwargs)
tempearture: float = 20,
power: int = 1.0,
eps: float = 1e-6,
norm_over_kernel: bool = False,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.norm_over_kernel = norm_over_kernel
self.eps = eps

def forward(self, x):
def forward(self, x: Tensor) -> Tensor:
ZwwWayne marked this conversation as resolved.
Show resolved Hide resolved
"""Forward function for `NormedConv2d`."""
if not self.norm_over_kernel:
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) +
Expand Down
46 changes: 25 additions & 21 deletions mmdet/models/layers/positional_encoding.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,10 @@
import torch
import torch.nn as nn
from mmengine.model import BaseModule
from torch import Tensor

from mmdet.registry import MODELS
from mmdet.utils import MultiConfig, OptMultiConfig


@MODELS.register_module()
Expand All @@ -31,18 +33,18 @@ class SinePositionalEncoding(BaseModule):
offset (float): offset add to embed when do the normalization.
Defaults to 0.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Defaults to None
"""

def __init__(self,
num_feats,
temperature=10000,
normalize=False,
scale=2 * math.pi,
eps=1e-6,
offset=0.,
init_cfg=None):
super(SinePositionalEncoding, self).__init__(init_cfg)
num_feats: int,
temperature: int = 10000,
normalize: bool = False,
scale: float = 2 * math.pi,
eps: float = 1e-6,
offset: float = 0.,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
if normalize:
assert isinstance(scale, (float, int)), 'when normalize is set,' \
'scale should be provided and in float or int type, ' \
Expand All @@ -54,7 +56,7 @@ def __init__(self,
self.eps = eps
self.offset = offset

def forward(self, mask):
def forward(self, mask: Tensor) -> Tensor:
"""Forward function for `SinePositionalEncoding`.

Args:
Expand Down Expand Up @@ -93,7 +95,7 @@ def forward(self, mask):
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos

def __repr__(self):
def __repr__(self) -> str:
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(num_feats={self.num_feats}, '
Expand All @@ -113,25 +115,27 @@ class LearnedPositionalEncoding(BaseModule):
along x-axis or y-axis. The final returned dimension for
each position is 2 times of this value.
row_num_embed (int, optional): The dictionary size of row embeddings.
Default 50.
Defaults to 50.
col_num_embed (int, optional): The dictionary size of col embeddings.
Default 50.
Defaults to 50.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""

def __init__(self,
num_feats,
row_num_embed=50,
col_num_embed=50,
init_cfg=dict(type='Uniform', layer='Embedding')):
super(LearnedPositionalEncoding, self).__init__(init_cfg)
def __init__(
self,
num_feats: int,
row_num_embed: int = 50,
col_num_embed: int = 50,
init_cfg: MultiConfig = dict(type='Uniform', layer='Embedding')
) -> None:
super().__init__(init_cfg=init_cfg)
self.row_embed = nn.Embedding(row_num_embed, num_feats)
self.col_embed = nn.Embedding(col_num_embed, num_feats)
self.num_feats = num_feats
self.row_num_embed = row_num_embed
self.col_num_embed = col_num_embed

def forward(self, mask):
def forward(self, mask: Tensor) -> Tensor:
"""Forward function for `LearnedPositionalEncoding`.

Args:
Expand All @@ -155,7 +159,7 @@ def forward(self, mask):
1).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1)
return pos

def __repr__(self):
def __repr__(self) -> str:
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(num_feats={self.num_feats}, '
Expand Down
71 changes: 38 additions & 33 deletions mmdet/models/layers/res_layer.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,13 @@
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional

from mmcv.cnn import build_conv_layer, build_norm_layer
from mmengine.model import BaseModule, Sequential
from torch import Tensor
from torch import nn as nn

from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig


class ResLayer(Sequential):
"""ResLayer to build ResNet style backbone.
Expand All @@ -12,28 +17,28 @@ class ResLayer(Sequential):
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
stride (int): stride of the first block. Defaults to 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False
downsampling in the bottleneck. Defaults to False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
Defaults to None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
Defaults to dict(type='BN')
downsample_first (bool): Downsample at the first block or last block.
False for Hourglass, True for ResNet. Default: True
False for Hourglass, True for ResNet. Defaults to True
"""

def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
avg_down=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
downsample_first=True,
**kwargs):
block: BaseModule,
inplanes: int,
planes: int,
num_blocks: int,
stride: int = 1,
avg_down: bool = False,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN'),
downsample_first: bool = True,
**kwargs) -> None:
self.block = block

downsample = None
Expand Down Expand Up @@ -101,7 +106,7 @@ def __init__(self,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
super(ResLayer, self).__init__(*layers)
super().__init__(*layers)


class SimplifiedBasicBlock(BaseModule):
Expand All @@ -114,19 +119,19 @@ class SimplifiedBasicBlock(BaseModule):
expansion = 1
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Line 109: super().init(*layers)


def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None,
init_fg=None):
super(SimplifiedBasicBlock, self).__init__(init_fg)
inplanes: int,
planes: int,
stride: int = 1,
dilation: int = 1,
downsample: Optional[Sequential] = None,
style: ConfigType = 'pytorch',
with_cp: bool = False,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN'),
dcn: OptConfigType = None,
plugins: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
assert dcn is None, 'Not implemented yet.'
assert plugins is None, 'Not implemented yet.'
assert not with_cp, 'Not implemented yet.'
Expand Down Expand Up @@ -159,17 +164,17 @@ def __init__(self,
self.with_cp = with_cp

@property
def norm1(self):
def norm1(self) -> Optional[BaseModule]:
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name) if self.with_norm else None

@property
def norm2(self):
def norm2(self) -> Optional[BaseModule]:
"""nn.Module: normalization layer after the second convolution layer"""
BIGWangYuDong marked this conversation as resolved.
Show resolved Hide resolved
return getattr(self, self.norm2_name) if self.with_norm else None

def forward(self, x):
"""Forward function."""
def forward(self, x: Tensor) -> Tensor:
"""Forward function for SimplifiedBasicBlock."""

identity = x

Expand Down
20 changes: 11 additions & 9 deletions mmdet/models/layers/se_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,17 @@ class SELayer(BaseModule):
Args:
channels (int): The input (and output) channels of the SE layer.
ratio (int): Squeeze ratio in SELayer, the intermediate channel will be
``int(channels/ratio)``. Default: 16.
``int(channels/ratio)``. Defaults to 16.
conv_cfg (None or dict): Config dict for convolution layer.
Default: None, which means using conv2d.
Defaults to None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configurated
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configurated by the first dict and the
second activation layer will be configurated by the second dict.
Default: (dict(type='ReLU'), dict(type='Sigmoid'))
Defaults to (dict(type='ReLU'), dict(type='Sigmoid'))
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Defaults to None
"""

def __init__(self,
Expand Down Expand Up @@ -57,6 +57,7 @@ def __init__(self,
act_cfg=act_cfg[1])

def forward(self, x: Tensor) -> Tensor:
"""Forward function for SELayer."""
out = self.global_avgpool(x)
out = self.conv1(out)
out = self.conv2(out)
Expand All @@ -75,18 +76,18 @@ class DyReLU(BaseModule):
channels (int): The input (and output) channels of DyReLU module.
ratio (int): Squeeze ratio in Squeeze-and-Excitation-like module,
the intermediate channel will be ``int(channels/ratio)``.
Default: 4.
Defaults to 4.
conv_cfg (None or dict): Config dict for convolution layer.
Default: None, which means using conv2d.
Defaults to None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configurated
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configurated by the first dict and the
second activation layer will be configurated by the second dict.
Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0,
Defaults to (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0,
divisor=6.0))
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Defaults to None
"""

def __init__(self,
Expand Down Expand Up @@ -140,7 +141,7 @@ class ChannelAttention(BaseModule):
Args:
channels (int): The input (and output) channels of the attention layer.
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
Defaults to None
"""

def __init__(self, channels: int, init_cfg: OptMultiConfig = None) -> None:
Expand All @@ -150,6 +151,7 @@ def __init__(self, channels: int, init_cfg: OptMultiConfig = None) -> None:
self.act = nn.Hardsigmoid(inplace=True)

def forward(self, x: Tensor) -> Tensor:
"""Forward function for ChannelAttention."""
out = self.global_avgpool(x)
out = self.fc(out)
out = self.act(out)
Expand Down