diff --git a/configs/cityscapes/README.md b/configs/cityscapes/README.md index 8785cdb93..b8595f79c 100644 --- a/configs/cityscapes/README.md +++ b/configs/cityscapes/README.md @@ -195,10 +195,13 @@ def clip_weights_from_pretrain_of_coco_to_cityscapes(f, out_file): print("f: {}\nout_file: {}".format(f, out_file)) torch.save(m, out_file) ``` -Step 3: modify the `input&solver` configuration in the `yaml` file, like this: +Step 3: modify the `input&weight&solver` configuration in the `yaml` file, like this: ``` +MODEL: + WEIGHT: "xxx.pth" # the model u save from above code + INPUT: - MIN_SIZE_TRAIN: (800, 832, 863, 896, 928, 960, 992, 1024, 1024) + MIN_SIZE_TRAIN: (800, 832, 864, 896, 928, 960, 992, 1024, 1024) MAX_SIZE_TRAIN: 2048 MIN_SIZE_TEST: 1024 MAX_SIZE_TEST: 2048 @@ -210,4 +213,5 @@ SOLVER: STEPS: (3000,) MAX_ITER: 4000 ``` +Step 4: train the model. diff --git a/maskrcnn_benchmark/layers/dcn/deform_conv_func.py b/maskrcnn_benchmark/layers/dcn/deform_conv_func.py index ddc92bb0c..a276a05fe 100644 --- a/maskrcnn_benchmark/layers/dcn/deform_conv_func.py +++ b/maskrcnn_benchmark/layers/dcn/deform_conv_func.py @@ -1,5 +1,6 @@ import torch from torch.autograd import Function +from torch.autograd.function import once_differentiable from torch.nn.modules.utils import _pair from maskrcnn_benchmark import _C @@ -67,6 +68,7 @@ def forward( return output @staticmethod + @once_differentiable def backward(ctx, grad_output): input, offset, weight = ctx.saved_tensors @@ -201,6 +203,7 @@ def forward( return output @staticmethod + @once_differentiable def backward(ctx, grad_output): if not grad_output.is_cuda: raise NotImplementedError diff --git a/maskrcnn_benchmark/layers/dcn/deform_pool_func.py b/maskrcnn_benchmark/layers/dcn/deform_pool_func.py index f18fdd4cb..2f7810b23 100644 --- a/maskrcnn_benchmark/layers/dcn/deform_pool_func.py +++ b/maskrcnn_benchmark/layers/dcn/deform_pool_func.py @@ -1,5 +1,6 @@ import torch from torch.autograd import Function +from torch.autograd.function import once_differentiable from maskrcnn_benchmark import _C @@ -60,6 +61,7 @@ def forward( return output @staticmethod + @once_differentiable def backward(ctx, grad_output): if not grad_output.is_cuda: raise NotImplementedError