Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: fix latent pylint errors in many source files #115

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion dgp/agents/agent_2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,12 +46,13 @@ def load(cls, agent_snapshots_pb2, ontology, feature_ontology_table):
ontology: Ontology
Ontology for given agent.

feature_ontology_table: dict, default: None
feature_ontology_table: dict, optional
A dictionary mapping feature type key(s) to Ontology(s), i.e.:
{
"agent_2d": AgentFeatureOntology[<ontology_sha>],
"agent_3d": AgentFeatureOntology[<ontology_sha>]
}
Default: None.

Returns
-------
Expand Down
22 changes: 15 additions & 7 deletions dgp/agents/agent_3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def load(cls, agent_snapshots_pb2, ontology, feature_ontology_table):
ontology: Ontology
Ontology for given agent.

feature_ontology_table: dict, default: None
feature_ontology_table: dict
A dictionary mapping feature type key(s) to Ontology(s), i.e.:
{
"agent_2d": AgentFeatureOntology[<ontology_sha>],
Expand Down Expand Up @@ -94,17 +94,25 @@ def render(self, image, camera, line_thickness=2, font_scale=0.5):

Parameters
----------
image: np.uint8 array
Image (H, W, C) to render the bounding box onto. We assume the input image is in *RGB* format
image: np.ndarray
Image (H, W, C) to render the bounding box onto. We assume the input image is in *RGB* format.
Data type is uint8.

camera: dgp.utils.camera.Camera
Camera used to render the bounding box.

line_thickness: int, default: 2
Thickness of bounding box lines.
line_thickness: int, optional
Thickness of bounding box lines. Default: 2.

font_scale: float, default: 0.5
Font scale used in text labels.
font_scale: float, optional
Font scale used in text labels. Default: 0.5.

Raises
------
ValueError
Raised if `image` is not a 3-channel uint8 numpy array.
TypeError
Raised if `camera` is not an instance of Camera.
"""
if (
not isinstance(image, np.ndarray) or image.dtype != np.uint8 or len(image.shape) != 3 or image.shape[2] != 3
Expand Down
7 changes: 4 additions & 3 deletions dgp/agents/base_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,18 +32,19 @@ def load(cls, agent_snapshots_pb2, ontology, feature_ontology_table):

Parameters
----------
agent_snapshots_pb2: agent proto object
A proto message holding agent information.
agent_snapshots_pb2: object
An agent proto message holding agent information.

ontology: Ontology
Ontology for given agent.

feature_ontology_table: dict, default: None
feature_ontology_table: dict, optional
A dictionary mapping feature type key(s) to Ontology(s), i.e.:
{
"agent_2d": AgentFeatureOntology[<ontology_sha>],
"agent_3d": AgentFeatureOntology[<ontology_sha>]
}
Default: None.
"""

@abstractmethod
Expand Down
2 changes: 1 addition & 1 deletion dgp/annotations/base_annotation.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def save(self, save_dir):
"""Serialize annotation object if possible, and saved to specified directory.
Annotations are saved in format <save_dir>/<sha>.<ext>

Paramaters
Parameters
----------
save_dir: str
Path to directory to saved annotation
Expand Down
38 changes: 31 additions & 7 deletions dgp/annotations/bounding_box_3d_annotation.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,11 @@ def to_proto(self):
def save(self, save_dir):
"""Serialize Annotation object and saved to specified directory. Annotations are saved in format <save_dir>/<sha>.<ext>

Parameters
----------
save_dir: str
A pathname to a directory to save the annotation object into.

Returns
-------
output_annotation_file: str
Expand All @@ -107,17 +112,25 @@ def render(self, image, camera, line_thickness=2, font_scale=0.5):

Parameters
----------
image: np.uint8 array
Image (H, W, C) to render the bounding box onto. We assume the input image is in *RGB* format
image: np.uint8
Image (H, W, C) to render the bounding box onto. We assume the input image is in *RGB* format.
Element type must be uint8.

camera: dgp.utils.camera.Camera
Camera used to render the bounding box.

line_thickness: int, default: 2
Thickness of bounding box lines.
line_thickness: int, optional
Thickness of bounding box lines. Default: 2.

font_scale: float, default: 0.5
Font scale used in text labels.
font_scale: float, optional
Font scale used in text labels. Default: 0.5.

Raises
------
ValueError
Raised if image is not a 3-channel uint8 numpy array.
TypeError
Raised if camera is not an instance of Camera.
"""
if (
not isinstance(image, np.ndarray) or image.dtype != np.uint8 or len(image.shape) != 3 or image.shape[2] != 3
Expand Down Expand Up @@ -165,5 +178,16 @@ def hexdigest(self):
return generate_uid_from_pbobject(self.to_proto())

def project(self, camera):
"""Project bounding boxes into a camera and get back 2D bounding boxes in the frustum."""
"""Project bounding boxes into a camera and get back 2D bounding boxes in the frustum.

Parameters
----------
camera: Camera
The Camera instance to project into.

Raises
------
NotImplementedError
Unconditionally.
"""
raise NotImplementedError
2 changes: 1 addition & 1 deletion dgp/annotations/depth_annotation.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def save(self, save_dir):
"""Serialize annotation object if possible, and saved to specified directory.
Annotations are saved in format <save_dir>/<sha>.<ext>

Paramaters
Parameters
----------
save_dir: str
Path to directory to saved annotation
Expand Down
7 changes: 7 additions & 0 deletions dgp/annotations/ontology.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,13 @@ def load(cls, ontology_file):
----------
ontology_file: str
Path to ontology JSON

Raises
------
FileNotFoundError
Raised if ontology_file does not exist.
Exception
Raised if we could not open the ontology file for some reason.
"""
if os.path.exists(ontology_file):
ontology_pb2 = open_ontology_pbobject(ontology_file)
Expand Down
18 changes: 12 additions & 6 deletions dgp/annotations/panoptic_segmentation_2d_annotation.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# Copyright 2021 Toyota Research Institute. All rights reserved.
import json
import locale
import os

import cv2
Expand Down Expand Up @@ -98,6 +99,11 @@ def parse_panoptic_image(self):
-------
instance_masks: list[InstanceMask2D]
Instance mask for each instance in panoptic annotation.

Raises
------
ValueError
Raised if an instance ID, parsed from a label, is negative.
"""
instance_masks = []
for class_name, labels in self.index_to_label.items():
Expand Down Expand Up @@ -181,8 +187,8 @@ def load(cls, annotation_file, ontology, panoptic_image_dtype=DEFAULT_PANOPTIC_I
ontology: Ontology
Ontology for given annotation

panoptic_image_dtype: type, default: np.uint16
Numpy data type (e.g. np.uint16, np.uint32, etc) of panoptic image.
panoptic_image_dtype: type, optional
Numpy data type (e.g. np.uint16, np.uint32, etc) of panoptic image. Default: np.uint16.
"""
panoptic_image = cv2.imread(annotation_file, cv2.IMREAD_UNCHANGED)
if len(panoptic_image.shape) == 3:
Expand All @@ -191,7 +197,7 @@ def load(cls, annotation_file, ontology, panoptic_image_dtype=DEFAULT_PANOPTIC_I
_L = panoptic_image
label_map = _L[:, :, 2] + 256 * _L[:, :, 1] + 256 * 256 * _L[:, :, 0]
panoptic_image = label_map.astype(panoptic_image_dtype)
with open('{}.json'.format(os.path.splitext(annotation_file)[0])) as _f:
with open('{}.json'.format(os.path.splitext(annotation_file)[0]), encoding=locale.getpreferredencoding()) as _f:
index_to_label = json.load(_f)
return cls(ontology, panoptic_image, index_to_label, panoptic_image_dtype)

Expand Down Expand Up @@ -219,8 +225,8 @@ def from_masklist(cls, masklist, ontology, mask_shape=None, panoptic_image_dtype
mask_shape: list[int]
Height and width of the mask. Only used to create an empty panoptic image when masklist is empty.

panoptic_image_dtype: type, default: np.uint16
Numpy data type (e.g. np.uint16, np.uint32, etc) of panoptic image.
panoptic_image_dtype: type, optional
Numpy data type (e.g. np.uint16, np.uint32, etc) of panoptic image. Default: np.uint16.
"""
if not masklist:
panoptic_image = np.ones(mask_shape, panoptic_image_dtype) * ontology.VOID_ID
Expand Down Expand Up @@ -283,7 +289,7 @@ def save(self, save_dir, datum=None):
cv2.imwrite(panoptic_image_path, self.panoptic_image)

index_to_label_path = '{}.json'.format(os.path.splitext(panoptic_image_path)[0])
with open(index_to_label_path, 'w') as _f:
with open(index_to_label_path, 'w', encoding=locale.getpreferredencoding()) as _f:
json.dump(self.index_to_label, _f)

return panoptic_image_path
Expand Down
10 changes: 5 additions & 5 deletions dgp/annotations/transform_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def remap_bounding_box_annotations(bounding_box_annotations, lookup_table, origi
bounding_box_annotations: BoundingBox2DAnnotationList or BoundingBox3DAnnotationList
Annotations to remap

lookup: dict
lookup_table: dict
Lookup from old class names to new class names
e.g.:
{
Expand Down Expand Up @@ -60,10 +60,10 @@ def remap_semantic_segmentation_2d_annotation(
"""
Parameters
----------
semantic_segmentation_2d_annotation: SemanticSegmentation2DAnnotation
semantic_segmentation_annotation: SemanticSegmentation2DAnnotation
Annotation to remap

lookup: dict
lookup_table: dict
Lookup from old class names to new class names
e.g.:
{
Expand Down Expand Up @@ -103,10 +103,10 @@ def remap_instance_segmentation_2d_annotation(
"""
Parameters
----------
instance_segmentation_2d_annotation: PanopticSegmentation2DAnnotation
instance_segmentation_annotation: PanopticSegmentation2DAnnotation
Annotation to remap

lookup: dict
lookup_table: dict
Lookup from old class names to new class names
e.g.:
{
Expand Down
29 changes: 17 additions & 12 deletions dgp/annotations/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,41 +153,46 @@ def __init__(self, original_ontology_table, lookup_table, remapped_ontology_tabl
class_name in self.remapped_ontology_table[annotation_key].class_names for class_name in lookup.values()
]), 'All values in `lookup` need to be valid class names in specified `remapped_ontology`'

def transform_datum(self, data):
def transform_datum(self, datum):
"""
Parameters
----------
data: OrderedDict
datum: OrderedDict
Dictionary containing raw data and annotations, with keys such as:
'rgb', 'intrinsics', 'bounding_box_2d'.
All annotation_keys in `self.lookup_table` (and `self.remapped_ontology_table`)
are expected to be contained

Returns
-------
data: OrderedDict
datum: OrderedDict
Same dictionary but with annotations in `self.lookup_table` remapped to desired ontologies

Raises
------
ValueError
Raised if the datum to remap does not contain all expected annotations.
"""
if not all([annotation_key in data for annotation_key in self.remapped_ontology_table]):
if not all([annotation_key in datum for annotation_key in self.remapped_ontology_table]):
raise ValueError('The data you are trying to remap does not have all annotations it expects')

for annotation_key, remapped_ontology in self.remapped_ontology_table.items():

lookup_table = self.lookup_table[annotation_key]
original_ontology = data[annotation_key].ontology
original_ontology = datum[annotation_key].ontology

# Need to have specific handlers for each annotation type
if annotation_key == 'bounding_box_2d' or annotation_key == 'bounding_box_3d':
data[annotation_key] = remap_bounding_box_annotations(
data[annotation_key], lookup_table, original_ontology, remapped_ontology
datum[annotation_key] = remap_bounding_box_annotations(
datum[annotation_key], lookup_table, original_ontology, remapped_ontology
)
elif annotation_key == 'semantic_segmentation_2d':
data[annotation_key] = remap_semantic_segmentation_2d_annotation(
data[annotation_key], lookup_table, original_ontology, remapped_ontology
datum[annotation_key] = remap_semantic_segmentation_2d_annotation(
datum[annotation_key], lookup_table, original_ontology, remapped_ontology
)
elif annotation_key == 'instance_segmentation_2d':
data[annotation_key] = remap_instance_segmentation_2d_annotation(
data[annotation_key], lookup_table, original_ontology, remapped_ontology
datum[annotation_key] = remap_instance_segmentation_2d_annotation(
datum[annotation_key], lookup_table, original_ontology, remapped_ontology
)

return data
return datum
10 changes: 10 additions & 0 deletions dgp/annotations/visibility_filter_transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,11 @@ def transform_sample(self, sample):
-------
new_sample: list[OrderedDict]
Multimodal sample with all detection annotations are filtered.

Raises
------
ValueError
Raised if a 2D or 3D bounding box instance lacks any required instance IDs.
"""
cam_datums = [datum for datum in sample if datum['datum_name'] in self._camera_datum_names]

Expand Down Expand Up @@ -189,6 +194,11 @@ def transform_sample(self, sample):
-------
new_sample: list[OrderedDict]
Multimodal sample with updated 3D bounding box annotations.

Raises
------
ValueError
Raised if there are multiple instances of the same kind of datum in a sample.
"""
# Mapping index to datum. The order of datums is preserved in output.
datums, src_datum_inds, dst_datum_ind = OrderedDict(), [], []
Expand Down
4 changes: 2 additions & 2 deletions dgp/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def cli():
@cli.command(name='visualize-scene')
@add_options(options=VISUALIZE_OPTIONS)
@click.option("--scene-json", required=True, help="Path to Scene JSON")
def visualize_scene(
def visualize_scene( # pylint: disable=missing-any-param-doc
scene_json, annotations, camera_datum_names, dataset_class, show_instance_id, max_num_items, video_fps, dst_dir,
verbose, lidar_datum_names, render_pointcloud, radar_datum_names, render_radar_pointcloud, render_raw
):
Expand Down Expand Up @@ -216,7 +216,7 @@ def visualize_scene(
help="Dataset split to be fetched."
)
@add_options(options=VISUALIZE_OPTIONS)
def visualize_scenes(
def visualize_scenes( # pylint: disable=missing-any-param-doc
scene_dataset_json, split, annotations, camera_datum_names, dataset_class, show_instance_id, max_num_items,
video_fps, dst_dir, verbose, lidar_datum_names, render_pointcloud, radar_datum_names, render_radar_pointcloud,
render_raw
Expand Down
Loading