Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

CodeCamp #33 [Fix]:add type hints for res_layer, se_layer,normed_predictor,positional_encoding #9346

Merged
merged 7 commits into from
Jan 3, 2023
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 18 additions & 12 deletions mmdet/models/layers/normed_predictor.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor

from mmdet.registry import MODELS

Expand All @@ -19,19 +20,24 @@ class NormedLinear(nn.Linear):
keep numerical stability. Default to 1e-6.
"""

def __init__(self, *args, tempearture=20, power=1.0, eps=1e-6, **kwargs):
super(NormedLinear, self).__init__(*args, **kwargs)
def __init__(self,
*args,
tempearture: float = 20,
power: int = 1.0,
eps: float = 1e-6,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.eps = eps
self.init_weights()

def init_weights(self):
def init_weights(self) -> None:
ZwwWayne marked this conversation as resolved.
Show resolved Hide resolved
nn.init.normal_(self.weight, mean=0, std=0.01)
if self.bias is not None:
nn.init.constant_(self.bias, 0)

def forward(self, x):
def forward(self, x) -> Tensor:
weight_ = self.weight / (
ZwwWayne marked this conversation as resolved.
Show resolved Hide resolved
self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
Expand All @@ -50,23 +56,23 @@ class NormedConv2d(nn.Conv2d):
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
norm_over_kernel (bool, optional): Normalize over kernel.
Default to False.
Default: False.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Default: -> Defaults to

"""

ZwwWayne marked this conversation as resolved.
Show resolved Hide resolved
def __init__(self,
*args,
tempearture=20,
power=1.0,
eps=1e-6,
norm_over_kernel=False,
**kwargs):
super(NormedConv2d, self).__init__(*args, **kwargs)
tempearture: float = 20,
power: int = 1.0,
eps: float = 1e-6,
norm_over_kernel: bool = False,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.norm_over_kernel = norm_over_kernel
self.eps = eps

def forward(self, x):
def forward(self, x) -> Tensor:
if not self.norm_over_kernel:
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) +
Expand Down
40 changes: 22 additions & 18 deletions mmdet/models/layers/positional_encoding.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,10 @@
import torch
import torch.nn as nn
from mmengine.model import BaseModule
from torch import Tensor

from mmdet.registry import MODELS
from mmdet.utils import MultiConfig, OptMultiConfig


@MODELS.register_module()
Expand Down Expand Up @@ -35,14 +37,14 @@ class SinePositionalEncoding(BaseModule):
"""

def __init__(self,
num_feats,
temperature=10000,
normalize=False,
scale=2 * math.pi,
eps=1e-6,
offset=0.,
init_cfg=None):
super(SinePositionalEncoding, self).__init__(init_cfg)
num_feats: int,
temperature: int = 10000,
normalize: bool = False,
scale: float = 2 * math.pi,
eps: float = 1e-6,
offset: float = 0.,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

init_cfg=init_cfg

if normalize:
assert isinstance(scale, (float, int)), 'when normalize is set,' \
'scale should be provided and in float or int type, ' \
Expand All @@ -54,7 +56,7 @@ def __init__(self,
self.eps = eps
self.offset = offset

def forward(self, mask):
def forward(self, mask) -> Tensor:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the type of mask

"""Forward function for `SinePositionalEncoding`.

Args:
Expand Down Expand Up @@ -93,7 +95,7 @@ def forward(self, mask):
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos

def __repr__(self):
def __repr__(self) -> str:
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(num_feats={self.num_feats}, '
Expand All @@ -119,19 +121,21 @@ class LearnedPositionalEncoding(BaseModule):
init_cfg (dict or list[dict], optional): Initialization config dict.
"""

def __init__(self,
num_feats,
row_num_embed=50,
col_num_embed=50,
init_cfg=dict(type='Uniform', layer='Embedding')):
super(LearnedPositionalEncoding, self).__init__(init_cfg)
def __init__(
self,
num_feats: int,
row_num_embed: int = 50,
col_num_embed: int = 50,
init_cfg: MultiConfig = dict(type='Uniform', layer='Embedding')
) -> None:
super().__init__(init_cfg)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

init_cfg=init_cfg

self.row_embed = nn.Embedding(row_num_embed, num_feats)
self.col_embed = nn.Embedding(col_num_embed, num_feats)
self.num_feats = num_feats
self.row_num_embed = row_num_embed
self.col_num_embed = col_num_embed

def forward(self, mask):
def forward(self, mask) -> Tensor:
"""Forward function for `LearnedPositionalEncoding`.

Args:
Expand All @@ -155,7 +159,7 @@ def forward(self, mask):
1).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1)
return pos

def __repr__(self):
def __repr__(self) -> str:
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(num_feats={self.num_feats}, '
Expand Down
59 changes: 32 additions & 27 deletions mmdet/models/layers/res_layer.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,13 @@
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional

from mmcv.cnn import build_conv_layer, build_norm_layer
from mmengine.model import BaseModule, Sequential
from torch import Tensor
from torch import nn as nn

from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig


class ResLayer(Sequential):
"""ResLayer to build ResNet style backbone.
Expand All @@ -24,16 +29,16 @@ class ResLayer(Sequential):
"""

def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
avg_down=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
downsample_first=True,
**kwargs):
block: BaseModule,
inplanes: int,
planes: int,
num_blocks: int,
stride: int = 1,
avg_down: bool = False,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN'),
downsample_first: bool = True,
**kwargs) -> None:
self.block = block

downsample = None
Expand Down Expand Up @@ -101,7 +106,7 @@ def __init__(self,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
super(ResLayer, self).__init__(*layers)
super().__init__(*layers)


class SimplifiedBasicBlock(BaseModule):
Expand All @@ -114,19 +119,19 @@ class SimplifiedBasicBlock(BaseModule):
expansion = 1
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Line 109: super().init(*layers)


def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None,
init_fg=None):
super(SimplifiedBasicBlock, self).__init__(init_fg)
inplanes: int,
planes: int,
stride: int = 1,
dilation: int = 1,
downsample: Optional[Sequential] = None,
style: ConfigType = 'pytorch',
with_cp: bool = False,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN'),
dcn: OptConfigType = None,
plugins: OptConfigType = None,
init_fg: OptMultiConfig = None) -> None:
ZwwWayne marked this conversation as resolved.
Show resolved Hide resolved
super().__init__(init_fg=init_fg)
assert dcn is None, 'Not implemented yet.'
assert plugins is None, 'Not implemented yet.'
assert not with_cp, 'Not implemented yet.'
Expand Down Expand Up @@ -159,16 +164,16 @@ def __init__(self,
self.with_cp = with_cp

@property
def norm1(self):
def norm1(self) -> Optional[BaseModule]:
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name) if self.with_norm else None

@property
def norm2(self):
def norm2(self) -> Optional[BaseModule]:
"""nn.Module: normalization layer after the second convolution layer"""
BIGWangYuDong marked this conversation as resolved.
Show resolved Hide resolved
return getattr(self, self.norm2_name) if self.with_norm else None

def forward(self, x):
def forward(self, x: Tensor) -> Tensor:
"""Forward function."""

identity = x
Expand Down
2 changes: 1 addition & 1 deletion mmdet/models/layers/se_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ class ChannelAttention(BaseModule):
Args:
channels (int): The input (and output) channels of the attention layer.
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
Defaults: None
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Defaults to None

"""

def __init__(self, channels: int, init_cfg: OptMultiConfig = None) -> None:
Expand Down