Skip to content

Commit

Permalink
reformatted code
Browse files Browse the repository at this point in the history
Signed-off-by: Tomasz Kornuta <[email protected]>
  • Loading branch information
tkornuta-nvidia committed May 22, 2020
1 parent 519b663 commit 56fc0b3
Show file tree
Hide file tree
Showing 11 changed files with 77 additions and 52 deletions.
15 changes: 8 additions & 7 deletions nemo/collections/cv/examples/mnist_ffn_image_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,16 +16,15 @@

import argparse

from torch import mean, stack, max, tensor
from torch import max, mean, stack, tensor

from nemo.utils import logging
import nemo.utils.argparse as nm_argparse
from nemo.core import NeuralModuleFactory, DeviceType, NeuralGraph, OperationMode, SimpleLossLoggerCallback

from nemo.collections.cv.modules.data_layers.mnist_datalayer import MNISTDataLayer
from nemo.collections.cv.modules.losses.nll_loss import NLLLoss
from nemo.collections.cv.modules.trainables.feed_forward_network import FeedForwardNetwork
from nemo.collections.cv.modules.non_trainables.reshape_tensor import ReshapeTensor
from nemo.collections.cv.modules.trainables.feed_forward_network import FeedForwardNetwork
from nemo.core import DeviceType, NeuralGraph, NeuralModuleFactory, OperationMode, SimpleLossLoggerCallback
from nemo.utils import logging

if __name__ == "__main__":
# Create the default parser.
Expand All @@ -39,8 +38,10 @@
# Data layers for training and validation.
dl = MNISTDataLayer(height=28, width=28, train=True)
# Model.
reshaper = ReshapeTensor(input_dims=[-1, 1, 32,32], output_dims=[-1, 784])
ffn = FeedForwardNetwork(input_size=784, output_size=10, hidden_sizes=[100, 100], dropout_rate=0.1, final_logsoftmax=True)
reshaper = ReshapeTensor(input_dims=[-1, 1, 32, 32], output_dims=[-1, 784])
ffn = FeedForwardNetwork(
input_size=784, output_size=10, hidden_sizes=[100, 100], dropout_rate=0.1, final_logsoftmax=True
)
# Loss.
nll_loss = NLLLoss()

Expand Down
14 changes: 10 additions & 4 deletions nemo/collections/cv/examples/mnist_lenet5_image_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,21 @@

import argparse

from torch import mean, stack, max, tensor
from torch import max, mean, stack, tensor

from nemo.utils import logging
import nemo.utils.argparse as nm_argparse
from nemo.core import NeuralModuleFactory, DeviceType, NeuralGraph, OperationMode, EvaluatorCallback, SimpleLossLoggerCallback

from nemo.collections.cv.modules.data_layers.mnist_datalayer import MNISTDataLayer
from nemo.collections.cv.modules.losses.nll_loss import NLLLoss
from nemo.collections.cv.modules.trainables.lenet5 import LeNet5
from nemo.core import (
DeviceType,
EvaluatorCallback,
NeuralGraph,
NeuralModuleFactory,
OperationMode,
SimpleLossLoggerCallback,
)
from nemo.utils import logging

if __name__ == "__main__":
# Create the default parser.
Expand Down
2 changes: 1 addition & 1 deletion nemo/collections/cv/modules/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,5 +16,5 @@

import nemo.collections.cv.modules.data_layers
import nemo.collections.cv.modules.losses
import nemo.collections.cv.modules.trainables
import nemo.collections.cv.modules.non_trainables
import nemo.collections.cv.modules.trainables
4 changes: 3 additions & 1 deletion nemo/collections/cv/modules/data_layers/mnist_datalayer.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,9 @@ class MNISTDataLayer(DataLayerNM, MNIST):
A "thin DataLayer" - wrapper around the torchvision's MNIST dataset.
"""

def __init__(self, name=None, height=28, width=28, data_folder="~/data/mnist", train=True, batch_size=64, shuffle=True):
def __init__(
self, name=None, height=28, width=28, data_folder="~/data/mnist", train=True, batch_size=64, shuffle=True
):
"""
Initializes the MNIST datalayer.
Expand Down
14 changes: 6 additions & 8 deletions nemo/collections/cv/modules/non_trainables/reshape_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,9 @@

from nemo.backends.pytorch.nm import NonTrainableNM
from nemo.core.neural_types import NeuralType, VoidType
from nemo.utils.decorators import add_port_docs
from nemo.utils.configuration_error import ConfigurationError
from nemo.utils import logging
from nemo.utils.configuration_error import ConfigurationError
from nemo.utils.decorators import add_port_docs

__all__ = ['ReshapeTensor']

Expand Down Expand Up @@ -74,7 +74,7 @@ def input_ports(self):
"""
return {
"inputs": NeuralType(['B'] + ['ANY'] * (len(self._input_dims) - 1), VoidType())
} # TODO: set proper sizes.
} # TODO: set proper sizes.

@property
@add_port_docs()
Expand All @@ -84,8 +84,7 @@ def output_ports(self):
"""
return {
"outputs": NeuralType(['B'] + ['ANY'] * (len(self._output_dims) - 1), VoidType())
} # TODO: set proper sizes of consecutive dimensions.

} # TODO: set proper sizes of consecutive dimensions.

def forward(self, inputs):
"""
Expand All @@ -98,8 +97,7 @@ def forward(self, inputs):
Returns:
Outputs a tensor [BATCH_SIZE x ...]
"""
#print("{}: input shape: {}, device: {}\n".format(self.name, inputs.shape, inputs.device))
# print("{}: input shape: {}, device: {}\n".format(self.name, inputs.shape, inputs.device))

# Reshape.
return inputs.view(self._output_dims)

return inputs.view(self._output_dims)
2 changes: 1 addition & 1 deletion nemo/collections/cv/modules/trainables/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,5 +14,5 @@
# limitations under the License.
# =============================================================================

from nemo.collections.cv.modules.trainables.lenet5 import *
from nemo.collections.cv.modules.trainables.feed_forward_network import *
from nemo.collections.cv.modules.trainables.lenet5 import *
67 changes: 40 additions & 27 deletions nemo/collections/cv/modules/trainables/feed_forward_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,16 +39,15 @@
import torch

from nemo.backends.pytorch.nm import TrainableNM
from nemo.core.neural_types import NeuralType, LogprobsType, VoidType
from nemo.utils.decorators import add_port_docs
from nemo.utils.configuration_error import ConfigurationError
from nemo.core.neural_types import LogprobsType, NeuralType, VoidType
from nemo.utils import logging

from nemo.utils.configuration_error import ConfigurationError
from nemo.utils.decorators import add_port_docs

__all__ = ['FeedForwardNetwork']


class FeedForwardNetwork(TrainableNM):
class FeedForwardNetwork(TrainableNM):
"""
A simple trainable module consisting of several stacked fully connected layers
with ReLU non-linearities and dropout between them.
Expand All @@ -57,7 +56,10 @@ class FeedForwardNetwork(TrainableNM):
Additionally, the module applies log softmax non-linearity on the output of the last layer (logits).
"""
def __init__(self, input_size, output_size, hidden_sizes=[], dimensions=2, dropout_rate=0, final_logsoftmax=False, name=None):

def __init__(
self, input_size, output_size, hidden_sizes=[], dimensions=2, dropout_rate=0, final_logsoftmax=False, name=None
):
"""
Initializes the classifier.
Expand Down Expand Up @@ -85,9 +87,15 @@ def __init__(self, input_size, output_size, hidden_sizes=[], dimensions=2, dropo
if len(self._output_size) == 1:
self._output_size = self._output_size[0]
else:
raise ConfigurationError("'output_size' must be a single value (received {})".format(self._output_size))

logging.info("Initializing network with input size = {} and output size = {}".format(self._input_size, self._output_size))
raise ConfigurationError(
"'output_size' must be a single value (received {})".format(self._output_size)
)

logging.info(
"Initializing network with input size = {} and output size = {}".format(
self._input_size, self._output_size
)
)

# Create the module list.
modules = []
Expand All @@ -98,42 +106,43 @@ def __init__(self, input_size, output_size, hidden_sizes=[], dimensions=2, dropo
input_dim = self._input_size
for hidden_dim in hidden_sizes:
# Add linear layer.
modules.append( torch.nn.Linear(input_dim, hidden_dim) )
modules.append(torch.nn.Linear(input_dim, hidden_dim))
# Add activation.
modules.append( torch.nn.ReLU() )
modules.append(torch.nn.ReLU())
# Add dropout.
if (dropout_rate > 0):
modules.append( torch.nn.Dropout(dropout_rate) )
if dropout_rate > 0:
modules.append(torch.nn.Dropout(dropout_rate))
# Remember size.
input_dim = hidden_dim

# Add the last output" (or in a special case: the only) layer.
modules.append( torch.nn.Linear(input_dim, self._output_size) )
modules.append(torch.nn.Linear(input_dim, self._output_size))

logging.info("Created {} hidden layers with sizes {}".format(len(hidden_sizes), hidden_sizes))

else:
raise ConfigurationError("'hidden_sizes' must contain a list with numbers of neurons in consecutive hidden layers (received {})".format(hidden_sizes))
raise ConfigurationError(
"'hidden_sizes' must contain a list with numbers of neurons in consecutive hidden layers (received {})".format(
hidden_sizes
)
)

# Create the final non-linearity.
self._final_logsoftmax = final_logsoftmax
if self._final_logsoftmax:
modules.append( torch.nn.LogSoftmax(dim=1) )
modules.append(torch.nn.LogSoftmax(dim=1))

# Finally create the sequential model out of those modules.
self.layers = torch.nn.Sequential(*modules)


@property
@add_port_docs()
def input_ports(self):
"""
Returns definitions of module input ports.
Batch of inputs, each represented as index [BATCH_SIZE x ... x INPUT_SIZE]
"""
return {
"inputs": NeuralType(['B'] + ['ANY'] * (self._dimensions - 1), VoidType())
}
return {"inputs": NeuralType(['B'] + ['ANY'] * (self._dimensions - 1), VoidType())}

@property
@add_port_docs()
Expand All @@ -160,13 +169,17 @@ def forward(self, inputs):
Returns:
Batch of outputs/predictions (log_probs) [BATCH_SIZE x ... x NUM_CLASSES]
"""
#print("{}: input shape: {}, device: {}\n".format(self.name, inputs.shape, inputs.device))

# print("{}: input shape: {}, device: {}\n".format(self.name, inputs.shape, inputs.device))

# Check that the input has the number of dimensions that we expect
assert len(inputs.shape) == self._dimensions, \
"Expected " + str(self._dimensions) + " dimensions for input, got " + str(len(inputs.shape))\
+ " instead. Check number of dimensions in the config."
assert len(inputs.shape) == self._dimensions, (
"Expected "
+ str(self._dimensions)
+ " dimensions for input, got "
+ str(len(inputs.shape))
+ " instead. Check number of dimensions in the config."
)

# Reshape such that we do a broadcast over the last dimension
origin_shape = inputs.shape
Expand All @@ -176,7 +189,7 @@ def forward(self, inputs):
outputs = self.layers(inputs)

# Restore the input dimensions but the last one (as it's been resized by the FFN)
outputs = outputs.view(*origin_shape[0:self._dimensions-1], -1)
outputs = outputs.view(*origin_shape[0 : self._dimensions - 1], -1)

# Return the result.
return outputs
return outputs
2 changes: 1 addition & 1 deletion nemo/collections/cv/modules/trainables/lenet5.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
import torch

from nemo.backends.pytorch.nm import TrainableNM
from nemo.core.neural_types import NeuralType, AxisKind, AxisType, LogprobsType, NormalizedValueType
from nemo.core.neural_types import AxisKind, AxisType, LogprobsType, NeuralType, NormalizedValueType
from nemo.utils.decorators import add_port_docs

__all__ = ['LeNet5']
Expand Down
3 changes: 2 additions & 1 deletion nemo/core/neural_types/elements.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,8 +195,9 @@ class CategoricalValuesType(PredictionsType):
class MaskType(PredictionsType):
"""Element type to represent boolean mask"""


class NormalizedValueType(ElementType):
"""
Element type to represent a value normalized to <0-1> range,
e.g. a single element (R) of normalized RGB image.
"""
"""
4 changes: 3 additions & 1 deletion nemo/core/neural_types/neural_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,9 @@ def compare_and_raise_error(self, parent_type_name, port_name, second_object):
type_comatibility != NeuralTypeComparisonResult.SAME
and type_comatibility != NeuralTypeComparisonResult.GREATER
):
raise NeuralPortNmTensorMismatchError(parent_type_name, port_name, str(self), str(second_object), type_comatibility)
raise NeuralPortNmTensorMismatchError(
parent_type_name, port_name, str(self), str(second_object), type_comatibility
)

@staticmethod
def __check_sanity(axes):
Expand Down
2 changes: 2 additions & 0 deletions nemo/utils/configuration_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,10 @@
https://github.com/IBM/pytorchpipe/blob/develop/ptp/configuration/configuration_error.py
"""


class ConfigurationError(Exception):
""" Error thrown when encountered a configuration issue. """

def __init__(self, msg):
""" Stores message """
self.msg = msg
Expand Down

0 comments on commit 56fc0b3

Please sign in to comment.