From e651d060116636ac85fbfe748162c4943b0e604e Mon Sep 17 00:00:00 2001 From: Walter Hugo Lopez Pinaya Date: Fri, 17 Feb 2023 09:45:40 +0000 Subject: [PATCH] Change and reorder the default values and reorder docstring Signed-off-by: Walter Hugo Lopez Pinaya --- .../networks/nets/patchgan_discriminator.py | 33 ++++++++++--------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/generative/networks/nets/patchgan_discriminator.py b/generative/networks/nets/patchgan_discriminator.py index 5648596b..b5a98c88 100644 --- a/generative/networks/nets/patchgan_discriminator.py +++ b/generative/networks/nets/patchgan_discriminator.py @@ -16,6 +16,7 @@ import torch import torch.nn as nn from monai.networks.blocks import Convolution +from monai.networks.layers import Act class MultiScalePatchDiscriminator(nn.Sequential): @@ -31,8 +32,8 @@ class MultiScalePatchDiscriminator(nn.Sequential): Args: num_d: number of discriminators num_layers_d: number of Convolution layers (Conv + activation + normalisation + [dropout]) in each - of the discriminators. In each layer, the number of channels are doubled and the spatial size is - divided by 2. + of the discriminators. In each layer, the number of channels are doubled and the spatial size is + divided by 2. spatial_dims: number of spatial dimensions (1D, 2D etc.) num_channels: number of filters in the first convolutional layer (double of the value is taken from then on) in_channels: number of input channels @@ -43,7 +44,7 @@ class MultiScalePatchDiscriminator(nn.Sequential): bias: introduction of layer bias dropout: proportion of dropout applied, defaults to 0. minimum_size_im: minimum spatial size of the input image. Introduced to make sure the architecture - requested isn't going to downsample the input image beyond value of 1. + requested isn't going to downsample the input image beyond value of 1. last_conv_kernel_size: kernel size of the last convolutional layer. """ @@ -54,10 +55,10 @@ def __init__( spatial_dims: int, num_channels: int, in_channels: int, - out_channels: int, - kernel_size: int, - activation: str | tuple = "PRELU", - norm: str | tuple = "INSTANCE", + out_channels: int = 1, + kernel_size: int = 4, + activation: str | tuple = (Act.LEAKYRELU, {"negative_slope": 0.2}), + norm: str | tuple = "BATCH", bias: bool = False, dropout: float | tuple = 0.0, minimum_size_im: int = 256, @@ -77,11 +78,11 @@ def __init__( "Please reduce num_layers, reduce num_D or enter bigger images." % (i_, num_layers_d_i) ) subnet_d = PatchDiscriminator( - num_layers_d_i, spatial_dims=spatial_dims, num_channels=self.num_channels, in_channels=in_channels, out_channels=out_channels, + num_layers_d=num_layers_d_i, kernel_size=kernel_size, activation=activation, norm=norm, @@ -122,13 +123,13 @@ class PatchDiscriminator(nn.Sequential): In CVPR 2018. Args: - num_layers_d: number of Convolution layers (Conv + activation + normalisation + [dropout]) in each - of the discriminators. In each layer, the number of channels are doubled and the spatial size is - divided by 2. spatial_dims: number of spatial dimensions (1D, 2D etc.) num_channels: number of filters in the first convolutional layer (double of the value is taken from then on) in_channels: number of input channels out_channels: number of output channels in each discriminator + num_layers_d: number of Convolution layers (Conv + activation + normalisation + [dropout]) in each + of the discriminators. In each layer, the number of channels are doubled and the spatial size is + divided by 2. kernel_size: kernel size of the convolution layers activation: activation layer type norm: normalisation type @@ -140,14 +141,14 @@ class PatchDiscriminator(nn.Sequential): def __init__( self, - num_layers_d: int, spatial_dims: int, num_channels: int, in_channels: int, - out_channels: int, - kernel_size: int, - activation: str | tuple = "PRELU", - norm: str | tuple = "INSTANCE", + out_channels: int = 1, + num_layers_d: int = 3, + kernel_size: int = 4, + activation: str | tuple = (Act.LEAKYRELU, {"negative_slope": 0.2}), + norm: str | tuple = "BATCH", bias: bool = False, padding: int | Sequence[int] = 1, dropout: float | tuple = 0.0,