Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merge release/v1.2.0 to develop #5265

Merged
merged 19 commits into from
Dec 13, 2024
Merged
Changes from 13 commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
1594990
support on-disk instance segmentations in SDK
brimoor Dec 11, 2024
68eb682
handle nested roots
brimoor Dec 12, 2024
8331661
handle list fields
brimoor Dec 12, 2024
73da3ae
use buffers for hasFrame (#5264)
benjaminpkane Dec 12, 2024
b0388a2
use heuristic for detecting grayscale images
sashankaryal Dec 12, 2024
e7f3edd
bump version after release branch creation
findtopher Dec 13, 2024
d570937
add 1% min
sashankaryal Dec 13, 2024
e48511d
add clarifying comments
sashankaryal Dec 13, 2024
d83c00a
fix rgb mask recoloring bug
sashankaryal Dec 13, 2024
64cf79b
Merge pull request #5256 from voxel51/on-disk-instances-updates
brimoor Dec 13, 2024
3956257
Merge branch 'merge/release/v1.2.0' of https://github.com/voxel51/fif…
voxel51-bot Dec 13, 2024
361556a
Merge branch 'release/v1.2.0' of https://github.com/voxel51/fiftyone …
voxel51-bot Dec 13, 2024
3053779
Sort Shuffle Stage in FfityOne App (#5270)
jnewb1 Dec 13, 2024
79b8395
fix(ci): AS-359 Update Ubuntu24 Binaries For MongoDB (#5269)
afoley587 Dec 13, 2024
8da1243
Sort Shuffle Stage in FfityOne App (#5270) (#5272)
findtopher Dec 13, 2024
76b0663
Merge branch 'merge/release/v1.2.0' of https://github.com/voxel51/fif…
voxel51-bot Dec 13, 2024
4019415
Merge branch 'release/v1.2.0' of https://github.com/voxel51/fiftyone …
voxel51-bot Dec 13, 2024
568da8a
Merge pull request #5266 from voxel51/fix/grayscale-segmentations
sashankaryal Dec 13, 2024
81336a0
Merge branch 'release/v1.2.0' of https://github.com/voxel51/fiftyone …
voxel51-bot Dec 13, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
@@ -4,7 +4,7 @@ on: workflow_call

jobs:
test-app:
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
19 changes: 19 additions & 0 deletions app/packages/looker/src/lookers/utils.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import { describe, expect, it } from "vitest";
import type { Buffers } from "../state";
import { hasFrame } from "./utils";

describe("looker utilities", () => {
it("determines frame availability given a buffer list", () => {
const BUFFERS: Buffers = [
[1, 3],
[5, 25],
];
for (const frameNumber of [1, 10, 25]) {
expect(hasFrame(BUFFERS, frameNumber)).toBe(true);
}

for (const frameNumber of [0, 4, 26]) {
expect(hasFrame(BUFFERS, frameNumber)).toBe(false);
}
});
});
7 changes: 7 additions & 0 deletions app/packages/looker/src/lookers/utils.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
import type { Buffers } from "../state";

export const hasFrame = (buffers: Buffers, frameNumber: number) => {
return buffers.some(
([start, end]) => start <= frameNumber && frameNumber <= end
);
};
9 changes: 2 additions & 7 deletions app/packages/looker/src/lookers/video.ts
Original file line number Diff line number Diff line change
@@ -19,6 +19,7 @@ import { addToBuffers, removeFromBuffers } from "../util";
import { AbstractLooker } from "./abstract";
import { type Frame, acquireReader, clearReader } from "./frame-reader";
import { LookerUtils, withFrames } from "./shared";
import { hasFrame } from "./utils";

let LOOKER_WITH_READER: VideoLooker | null = null;

@@ -394,13 +395,7 @@ export class VideoLooker extends AbstractLooker<VideoState, VideoSample> {
}

private hasFrame(frameNumber: number) {
if (frameNumber === this.firstFrameNumber) {
return this.firstFrame;
}
return (
this.frames.has(frameNumber) &&
this.frames.get(frameNumber)?.deref() !== undefined
);
return hasFrame(this.state.buffers, frameNumber);
}

private getFrame(frameNumber: number) {
8 changes: 5 additions & 3 deletions docs/source/user_guide/using_datasets.rst
Original file line number Diff line number Diff line change
@@ -2542,7 +2542,7 @@ Object detections stored in |Detections| may also have instance segmentation
masks.

These masks can be stored in one of two ways: either directly in the database
via the :attr:`mask<fiftyone.core.labels.Detection.mask>` attribute, or on
via the :attr:`mask <fiftyone.core.labels.Detection.mask>` attribute, or on
disk referenced by the
:attr:`mask_path <fiftyone.core.labels.Detection.mask_path>` attribute.

@@ -2605,8 +2605,10 @@ object's bounding box when visualizing in the App.
<Detection: {
'id': '5f8709282018186b6ef6682b',
'attributes': {},
'tags': [],
'label': 'cat',
'bounding_box': [0.48, 0.513, 0.397, 0.288],
'mask': None,
'mask_path': '/path/to/mask.png',
'confidence': 0.96,
'index': None,
@@ -2615,8 +2617,8 @@ object's bounding box when visualizing in the App.
}>,
}>

Like all |Label| types, you can also add custom attributes to your detections
by dynamically adding new fields to each |Detection| instance:
Like all |Label| types, you can also add custom attributes to your instance
segmentations by dynamically adding new fields to each |Detection| instance:

.. code-block:: python
:linenos:
2 changes: 1 addition & 1 deletion fiftyone/__public__.py
Original file line number Diff line number Diff line change
@@ -215,7 +215,6 @@
MatchLabels,
MatchTags,
Mongo,
Shuffle,
Select,
SelectBy,
SelectFields,
@@ -224,6 +223,7 @@
SelectGroupSlices,
SelectLabels,
SetField,
Shuffle,
Skip,
SortBy,
SortBySimilarity,
2 changes: 1 addition & 1 deletion fiftyone/constants.py
Original file line number Diff line number Diff line change
@@ -42,7 +42,7 @@
# This setting may be ``None`` if this client has no compatibility with other
# versions
#
COMPATIBLE_VERSIONS = ">=0.19,<1.3"
COMPATIBLE_VERSIONS = ">=0.19,<1.4"

# Package metadata
_META = metadata("fiftyone")
46 changes: 33 additions & 13 deletions fiftyone/core/collections.py
Original file line number Diff line number Diff line change
@@ -10662,9 +10662,7 @@ def _handle_db_fields(self, paths, frames=False):
db_fields_map = self._get_db_fields_map(frames=frames)
return [db_fields_map.get(p, p) for p in paths]

def _get_media_fields(
self, include_filepath=True, whitelist=None, frames=False
):
def _get_media_fields(self, whitelist=None, blacklist=None, frames=False):
media_fields = {}

if frames:
@@ -10674,13 +10672,13 @@ def _get_media_fields(
schema = self.get_field_schema(flat=True)
app_media_fields = set(self._dataset.app_config.media_fields)

if include_filepath:
# 'filepath' should already be in set, but add it just in case
app_media_fields.add("filepath")
else:
app_media_fields.discard("filepath")
# 'filepath' should already be in set, but add it just in case
app_media_fields.add("filepath")

for field_name, field in schema.items():
while isinstance(field, fof.ListField):
field = field.field

if field_name in app_media_fields:
media_fields[field_name] = None
elif isinstance(field, fof.EmbeddedDocumentField) and issubclass(
@@ -10695,14 +10693,28 @@ def _get_media_fields(
whitelist = {whitelist}

media_fields = {
k: v for k, v in media_fields.items() if k in whitelist
k: v
for k, v in media_fields.items()
if any(w == k or k.startswith(w + ".") for w in whitelist)
}

if blacklist is not None:
if etau.is_container(blacklist):
blacklist = set(blacklist)
else:
blacklist = {blacklist}

media_fields = {
k: v
for k, v in media_fields.items()
if not any(w == k or k.startswith(w + ".") for w in blacklist)
}

return media_fields

def _resolve_media_field(self, media_field):
def _parse_media_field(self, media_field):
if media_field in self._dataset.app_config.media_fields:
return media_field
return media_field, None

_media_field, is_frame_field = self._handle_frame_field(media_field)

@@ -10711,12 +10723,20 @@ def _resolve_media_field(self, media_field):
if leaf is not None:
leaf = root + "." + leaf

if _media_field in (root, leaf):
if _media_field in (root, leaf) or root.startswith(
_media_field + "."
):
_resolved_field = leaf if leaf is not None else root
if is_frame_field:
_resolved_field = self._FRAMES_PREFIX + _resolved_field

return _resolved_field
_list_fields = self._parse_field_name(
_resolved_field, auto_unwind=False
)[-2]
if _list_fields:
return _resolved_field, _list_fields[0]

return _resolved_field, None

raise ValueError("'%s' is not a valid media field" % media_field)

7 changes: 4 additions & 3 deletions fiftyone/core/labels.py
Original file line number Diff line number Diff line change
@@ -409,7 +409,8 @@ class Detection(_HasAttributesDict, _HasID, _HasMedia, Label):
its bounding box, which should be a 2D binary or 0/1 integer numpy
array
mask_path (None): the absolute path to the instance segmentation image
on disk
on disk, which should be a single-channel PNG image where any
non-zero values represent the instance's extent
confidence (None): a confidence in ``[0, 1]`` for the detection
index (None): an index for the object
attributes ({}): a dict mapping attribute names to :class:`Attribute`
@@ -532,8 +533,8 @@ def to_segmentation(self, mask=None, frame_size=None, target=255):
"""
if not self.has_mask:
raise ValueError(
"Only detections with their `mask` attributes populated can "
"be converted to segmentations"
"Only detections with their `mask` or `mask_path` attribute "
"populated can be converted to segmentations"
)

mask, target = _parse_segmentation_target(mask, frame_size, target)
2 changes: 1 addition & 1 deletion fiftyone/core/stages.py
Original file line number Diff line number Diff line change
@@ -8628,7 +8628,6 @@ def repr_ViewExpression(self, expr, level):
MatchLabels,
MatchTags,
Mongo,
Shuffle,
Select,
SelectBy,
SelectFields,
@@ -8637,6 +8636,7 @@ def repr_ViewExpression(self, expr, level):
SelectGroupSlices,
SelectLabels,
SetField,
Shuffle,
Skip,
SortBy,
SortBySimilarity,
106 changes: 61 additions & 45 deletions fiftyone/utils/data/exporters.py
Original file line number Diff line number Diff line change
@@ -12,11 +12,13 @@
import warnings
from collections import defaultdict

from bson import json_util
import pydash

import eta.core.datasets as etad
import eta.core.frameutils as etaf
import eta.core.serial as etas
import eta.core.utils as etau
from bson import json_util

import fiftyone as fo
import fiftyone.core.collections as foc
@@ -1892,7 +1894,7 @@ def log_collection(self, sample_collection):
self._metadata["frame_fields"] = schema

self._media_fields = sample_collection._get_media_fields(
include_filepath=False
blacklist="filepath",
)

info = dict(sample_collection.info)
@@ -2029,34 +2031,38 @@ def _export_frame_labels(self, sample, uuid):

def _export_media_fields(self, sd):
for field_name, key in self._media_fields.items():
value = sd.get(field_name, None)
if value is None:
continue

if key is not None:
self._export_media_field(value, field_name, key=key)
else:
self._export_media_field(sd, field_name)
self._export_media_field(sd, field_name, key=key)

def _export_media_field(self, d, field_name, key=None):
if key is not None:
value = d.get(key, None)
else:
key = field_name
value = d.get(field_name, None)

value = pydash.get(d, field_name, None)
if value is None:
return

media_exporter = self._get_media_field_exporter(field_name)
outpath, _ = media_exporter.export(value)

if self.abs_paths:
d[key] = outpath
else:
d[key] = fou.safe_relpath(
outpath, self.export_dir, default=outpath
)
if not isinstance(value, (list, tuple)):
value = [value]

for _d in value:
if key is not None:
_value = _d.get(key, None)
else:
_value = _d

if _value is None:
continue

outpath, _ = media_exporter.export(_value)

if not self.abs_paths:
outpath = fou.safe_relpath(
outpath, self.export_dir, default=outpath
)

if key is not None:
_d[key] = outpath
else:
pydash.set_(d, field_name, outpath)

def _get_media_field_exporter(self, field_name):
media_exporter = self._media_field_exporters.get(field_name, None)
@@ -2196,7 +2202,7 @@ def export_samples(self, sample_collection, progress=None):
_sample_collection = sample_collection

self._media_fields = sample_collection._get_media_fields(
include_filepath=False
blacklist="filepath"
)

logger.info("Exporting samples...")
@@ -2333,33 +2339,43 @@ def _prep_sample(sd):

def _export_media_fields(self, sd):
for field_name, key in self._media_fields.items():
value = sd.get(field_name, None)
if value is None:
continue
self._export_media_field(sd, field_name, key=key)

def _export_media_field(self, d, field_name, key=None):
value = pydash.get(d, field_name, None)
if value is None:
return

media_exporter = self._get_media_field_exporter(field_name)

if not isinstance(value, (list, tuple)):
value = [value]

for _d in value:
if key is not None:
self._export_media_field(value, field_name, key=key)
_value = _d.get(key, None)
else:
self._export_media_field(sd, field_name)
_value = _d

def _export_media_field(self, d, field_name, key=None):
if key is not None:
value = d.get(key, None)
else:
key = field_name
value = d.get(field_name, None)
if _value is None:
continue

if value is None:
return
if self.export_media is not False:
# Store relative path
_, uuid = media_exporter.export(_value)
outpath = os.path.join("fields", field_name, uuid)
elif self.rel_dir is not None:
# Remove `rel_dir` prefix from path
outpath = fou.safe_relpath(
_value, self.rel_dir, default=_value
)
else:
continue

if self.export_media is not False:
# Store relative path
media_exporter = self._get_media_field_exporter(field_name)
_, uuid = media_exporter.export(value)
d[key] = os.path.join("fields", field_name, uuid)
elif self.rel_dir is not None:
# Remove `rel_dir` prefix from path
d[key] = fou.safe_relpath(value, self.rel_dir, default=value)
if key is not None:
_d[key] = outpath
else:
pydash.set_(d, field_name, outpath)

def _get_media_field_exporter(self, field_name):
media_exporter = self._media_field_exporters.get(field_name, None)
Loading
Loading