diff --git a/mmdet/models/layers/transformer.py b/mmdet/models/layers/transformer.py index 26a29c757bd..dfd9fb5d075 100644 --- a/mmdet/models/layers/transformer.py +++ b/mmdet/models/layers/transformer.py @@ -391,7 +391,7 @@ def inverse_sigmoid(x, eps=1e-5): class DetrTransformerEncoder(BaseModule): - def __init__(self, layer_cfg=None, num_layers=None, init_cfg=None): + def __init__(self, layer_cfg=None, num_layers=None, init_cfg=None, **kwargs): super().__init__(init_cfg=init_cfg) if isinstance(layer_cfg, dict): @@ -423,7 +423,8 @@ def __init__(self, num_layers=None, post_norm_cfg=dict(type='LN'), return_intermediate=True, - init_cfg=None): + init_cfg=None, + **kwargs): super().__init__(init_cfg=init_cfg) if isinstance(layer_cfg, dict): layer_cfg = [copy.deepcopy(layer_cfg) for _ in range(num_layers)] @@ -475,7 +476,8 @@ def __init__(self, act_cfg=dict(type='ReLU', inplace=True)), norm_cfg=dict(type='LN'), init_cfg=None, - batch_first=False): + batch_first=False, + **kwargs): super().__init__(init_cfg=init_cfg) if 'batch_first' in self_attn_cfg: # TODO @@ -535,7 +537,8 @@ def __init__(self, ), norm_cfg=dict(type='LN'), init_cfg=None, - batch_first=False): + batch_first=False, + **kwargs): super().__init__(init_cfg=init_cfg) for attn_cfg in (self_attn_cfg, cross_attn_cfg): @@ -635,7 +638,8 @@ def __init__(self, with_proj=True, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN'), - init_cfg=None): + init_cfg=None, + **kwargs): super(DynamicConv, self).__init__(init_cfg) self.in_channels = in_channels self.feat_channels = feat_channels