diff --git a/webknossos/Changelog.md b/webknossos/Changelog.md index 780a1d9b9..85ae857f5 100644 --- a/webknossos/Changelog.md +++ b/webknossos/Changelog.md @@ -13,7 +13,12 @@ For upgrade instructions, please check the respective *Breaking Changes* section - `wk.Graph` now inherits from `networkx.Graph` directly. Therefore, the `nx_graph` attribute is removed. [#481](https://github.com/scalableminds/webknossos-libs/pull/481) ### Added +- Added `redownsample()` method to `Layer` to recompute existing downsampled magnifications. [#461](https://github.com/scalableminds/webknossos-libs/pull/461) +- Added `globalize_floodfill.py` script to globalize partially computed flood fill operations. [#461](https://github.com/scalableminds/webknossos-libs/pull/461) + ### Changed +- Improved performance for calculations with `Vec3Int` and `BoundingBox`. [#461](https://github.com/scalableminds/webknossos-libs/pull/461) + ### Fixed diff --git a/webknossos/script-collection/globalize_floodfill.py b/webknossos/script-collection/globalize_floodfill.py new file mode 100755 index 000000000..65c774c38 --- /dev/null +++ b/webknossos/script-collection/globalize_floodfill.py @@ -0,0 +1,356 @@ +import argparse +import logging +import os +import re +import textwrap +from collections import namedtuple +from contextlib import contextmanager +from pathlib import Path +from tempfile import TemporaryDirectory +from typing import Iterator, List, Set, Tuple + +import numpy as np + +import webknossos as wk +from webknossos.dataset import Layer, MagView +from webknossos.geometry import BoundingBox, Mag, Vec3Int +from webknossos.utils import add_verbose_flag, setup_logging, time_start, time_stop + +logger = logging.getLogger(__name__) + + +NEIGHBORS = [ + Vec3Int(1, 0, 0), + Vec3Int(-1, 0, 0), + Vec3Int(0, 1, 0), + Vec3Int(0, -1, 0), + Vec3Int(0, 0, 1), + Vec3Int(0, 0, -1), +] + +FloodFillBbox = namedtuple( + "FloodFillBbox", + ["bounding_box", "seed_position", "source_id", "target_id", "timestamp"], +) + + +def create_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + description=textwrap.dedent( + """\ + Example usage: + The following invocation will create a new dataset at "some/path/new_dataset" + which will be a shallow copy of "existing/dataset" with the exception that + the "segmentation" layer will have the volume data from "annotation/data" + merged in. Additionally, the partial flood-fills which are denoted in + "explorational.nml" will be continued/globalized. + + python -m script-collection.globalize_floodfill \\ + --output_path some/path/new_dataset \\ + --segmentation_layer_path existing/dataset/segmentation \\ + --volume_path annotation/data \\ + --nml_path explorational.nml + """ + ), + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + + parser.add_argument( + "--volume_path", + "-v", + help="Directory containing the volume tracing.", + type=Path, + required=True, + ) + + parser.add_argument( + "--segmentation_layer_path", + "-s", + help="Directory containing the segmentation layer.", + type=Path, + required=True, + ) + + parser.add_argument( + "--nml_path", + "-n", + help="NML that contains the bounding boxes", + type=Path, + required=True, + ) + + parser.add_argument( + "--output_path", "-o", help="Output directory", type=Path, required=True + ) + + add_verbose_flag(parser) + + return parser + + +def get_chunk_pos_and_offset( + global_position: Vec3Int, chunk_shape: Vec3Int +) -> Tuple[Vec3Int, Vec3Int]: + offset = global_position % chunk_shape + return ( + global_position - offset, + offset, + ) + + +def execute_floodfill( + data_mag: MagView, + seed_position: Vec3Int, + already_processed_bbox: BoundingBox, + source_id: int, + target_id: int, +) -> None: + cube_size = Vec3Int.full(data_mag.header.file_len * data_mag.header.block_len) + cube_bbox = BoundingBox(Vec3Int(0, 0, 0), cube_size) + chunk_with_relative_seed: List[Tuple[Vec3Int, Vec3Int]] = [ + get_chunk_pos_and_offset(seed_position, cube_size) + ] + + # The `is_visited` variable is used to know which parts of the already processed bbox + # were already traversed. Outside of that bounding box, the actual data already + # is an indicator of whether the flood-fill has reached a voxel. + is_visited = np.zeros(already_processed_bbox.size.to_tuple(), dtype=np.uint8) + chunk_count = 0 + + while len(chunk_with_relative_seed) > 0: + chunk_count += 1 + if chunk_count % 10000 == 0: + logger.info(f"Handled seed positions {chunk_count}") + + dirty_bucket = False + current_cube, relative_seed = chunk_with_relative_seed.pop() + global_seed = current_cube + relative_seed + + # Only reading one voxel for the seed can be up to 30,000 times faster + # which is very relevent, since the chunk doesn't need to be traversed + # if the seed voxel was already covered. + value_at_seed_position = data_mag.read(current_cube + relative_seed, (1, 1, 1)) + + if value_at_seed_position == source_id or ( + already_processed_bbox.contains(global_seed) + and value_at_seed_position == target_id + and not is_visited[global_seed - already_processed_bbox.topleft] + ): + logger.info( + f"Handling chunk {chunk_count} with current cube {current_cube}" + ) + time_start("read data") + cube_data = data_mag.read(current_cube, cube_size) + cube_data = cube_data[0, :, :, :] + time_stop("read data") + + seeds_in_current_chunk: Set[Vec3Int] = set() + seeds_in_current_chunk.add(relative_seed) + + time_start("traverse cube") + while len(seeds_in_current_chunk) > 0: + current_relative_seed = seeds_in_current_chunk.pop() + current_global_seed = current_cube + current_relative_seed + if already_processed_bbox.contains(current_global_seed): + is_visited[current_global_seed - already_processed_bbox.topleft] = 1 + + if cube_data[current_relative_seed] != target_id: + cube_data[current_relative_seed] = target_id + dirty_bucket = True + + # check neighbors + for neighbor in NEIGHBORS: + neighbor_pos = current_relative_seed + neighbor + + global_neighbor_pos = current_cube + neighbor_pos + if already_processed_bbox.contains(global_neighbor_pos): + if is_visited[ + global_neighbor_pos - already_processed_bbox.topleft + ]: + continue + if cube_bbox.contains(neighbor_pos): + if cube_data[neighbor_pos] == source_id or ( + already_processed_bbox.contains(global_neighbor_pos) + and cube_data[neighbor_pos] == target_id + ): + seeds_in_current_chunk.add(neighbor_pos) + else: + chunk_with_relative_seed.append( + get_chunk_pos_and_offset(global_neighbor_pos, cube_size) + ) + time_stop("traverse cube") + + if dirty_bucket: + time_start("write chunk") + data_mag.write(cube_data, current_cube) + time_stop("write chunk") + + +@contextmanager +def temporary_annotation_view(volume_annotation_path: Path) -> Iterator[Layer]: + + """ + Given a volume annotation path, create a temporary dataset which + contains the volume annotation via a symlink. Yield the layer + so that one can work with the annotation as a wk.Dataset. + """ + + with TemporaryDirectory() as tmp_annotation_dir: + tmp_annotation_dataset_path = ( + Path(tmp_annotation_dir) / "tmp_annotation_dataset" + ) + + input_annotation_dataset = wk.Dataset.get_or_create( + str(tmp_annotation_dataset_path), scale=(1, 1, 1) + ) + + # Ideally, the following code would be used, but there are two problems: + # - save_volume_annotation cannot deal with the + # new named volume annotation layers, yet + # - save_volume_annotation tries to read the entire data (beginning from 0, 0, 0) + # to infer the largest_segment_id which can easily exceed the available RAM. + # + # volume_annotation = open_annotation(volume_annotation_path) + # input_annotation_layer = volume_annotation.save_volume_annotation( + # input_annotation_dataset, "volume_annotation" + # ) + + os.symlink(volume_annotation_path, tmp_annotation_dataset_path / "segmentation") + input_annotation_layer = input_annotation_dataset.add_layer_for_existing_files( + layer_name="segmentation", + category="segmentation", + largest_segment_id=0, # This is incorrect, but for globalize_floodfill not relevant. + ) + + yield input_annotation_layer + + +def merge_with_fallback_layer( + output_path: Path, + volume_annotation_path: Path, + segmentation_layer_path: Path, +) -> MagView: + + assert not output_path.exists(), f"Dataset at {output_path} already exists" + + # Prepare output dataset by creatign a shallow copy of the dataset + # determined by segmentation_layer_path, but do a deep copy of + # segmentation_layer_path itself (so that we can mutate it). + input_segmentation_dataset = wk.Dataset(segmentation_layer_path.parent) + time_start("Prepare output dataset") + output_dataset = input_segmentation_dataset.shallow_copy_dataset( + output_path, + name=output_path.name, + make_relative=True, + layers_to_ignore=[segmentation_layer_path.name], + ) + output_layer = output_dataset.add_copy_layer( + segmentation_layer_path, segmentation_layer_path.name + ) + time_stop("Prepare output dataset") + + input_segmentation_mag = input_segmentation_dataset.get_layer( + segmentation_layer_path.name + ).get_best_mag() + with temporary_annotation_view(volume_annotation_path) as input_annotation_layer: + input_annotation_mag = input_annotation_layer.get_best_mag() + bboxes = list( + BoundingBox.from_tuple2(tuple) + for tuple in input_annotation_mag.get_bounding_boxes_on_disk() + ) + output_mag = output_layer.get_mag(input_segmentation_mag.mag) + + cube_size = output_mag.header.file_len * output_mag.header.block_len + chunks_with_bboxes = BoundingBox.group_boxes_with_aligned_mag( + bboxes, Mag(cube_size) + ) + + assert ( + input_annotation_mag.header.file_len == 1 + ), "volume annotation must have file_len=1" + assert ( + input_annotation_mag.header.voxel_type + == input_segmentation_mag.header.voxel_type + ), "Volume annotation must have same dtype as fallback layer" + + chunk_count = 0 + for chunk, bboxes in chunks_with_bboxes.items(): + chunk_count += 1 + logger.info(f"Processing chunk {chunk_count}...") + + time_start("Read chunk") + data_buffer = output_mag.read(chunk.topleft, chunk.size)[0, :, :, :] + time_stop("Read chunk") + + time_start("Read/merge bboxes") + for bbox in bboxes: + read_data = input_annotation_mag.read(bbox.topleft, bbox.size) + data_buffer[bbox.offset(-chunk.topleft).to_slices()] = read_data + time_stop("Read/merge bboxes") + + time_start("Write chunk") + output_mag.write(data_buffer, chunk.topleft) + time_stop("Write chunk") + return output_mag + + +def main(args: argparse.Namespace) -> None: + + # Use the skeleton API to read the bounding boxes once + # https://github.com/scalableminds/webknossos-libs/issues/482 is done. + nml_regex = re.compile( + r'' + ) + + bboxes: List[FloodFillBbox] = [] + nml_file = open(args.nml_path, "r", encoding="utf-8") + lines = nml_file.readlines() + nml_file.close() + for line in lines: + matches = nml_regex.findall(line) + for match in matches: + # each match is a tuple of (source_id, target_id, seed, timestamp, top_left_x, top_left_y, top_left_z, width, height, depth + bboxes.append( + FloodFillBbox( + bounding_box=BoundingBox( + (match[4], match[5], match[6]), (match[7], match[8], match[9]) + ), + seed_position=Vec3Int(match[2].split(",")), + source_id=int(match[0]), + target_id=int(match[1]), + timestamp=int(match[3]), + ) + ) + bboxes = sorted(bboxes, key=lambda x: x.timestamp) + + time_start("Merge with fallback layer") + data_mag = merge_with_fallback_layer( + args.output_path, + args.volume_path, + args.segmentation_layer_path, + ) + time_stop("Merge with fallback layer") + + time_start("All floodfills") + for floodfill in bboxes: + time_start("Floodfill") + execute_floodfill( + data_mag, + floodfill.seed_position, + floodfill.bounding_box, + floodfill.source_id, + floodfill.target_id, + ) + time_stop("Floodfill") + time_stop("All floodfills") + + time_start("Recompute downsampled mags") + data_mag.layer.redownsample() + time_stop("Recompute downsampled mags") + + +if __name__ == "__main__": + parsed_args = create_parser().parse_args() + setup_logging(parsed_args) + + main(parsed_args) diff --git a/webknossos/webknossos/annotation/annotation.py b/webknossos/webknossos/annotation/annotation.py index 696c4dd45..f1cbd5ab1 100644 --- a/webknossos/webknossos/annotation/annotation.py +++ b/webknossos/webknossos/annotation/annotation.py @@ -59,6 +59,8 @@ def dataset_name(self) -> str: def save_volume_annotation( self, dataset: Dataset, layer_name: str = "volume_annotation" ) -> Layer: + # todo pylint: disable=fixme + # the name is about to change with multiple volume annotations assert "data.zip" in self._filelist with self._zipfile.open("data.zip") as f: data_zip = ZipFile(f) @@ -78,6 +80,9 @@ def save_volume_annotation( ), ) min_mag_view = layer.mags[min(layer.mags)] + # todo pylint: disable=fixme + # this tries to read the entire DS into memory (beginning from 0, 0, 0). + # if the annotation begins at some other point, this will blow up the RAM unnecessarily. layer.largest_segment_id = int(min_mag_view.read().max()) return layer diff --git a/webknossos/webknossos/dataset/dataset.py b/webknossos/webknossos/dataset/dataset.py index 79b157bab..529821f35 100644 --- a/webknossos/webknossos/dataset/dataset.py +++ b/webknossos/webknossos/dataset/dataset.py @@ -7,7 +7,7 @@ from os.path import basename, join, normpath from pathlib import Path from shutil import rmtree -from typing import Any, Dict, Optional, Tuple, Union, cast +from typing import Any, Dict, List, Optional, Tuple, Union, cast import attr import numpy as np @@ -324,6 +324,7 @@ def add_layer_for_existing_files( ) -> Layer: assert layer_name not in self.layers, f"Layer {layer_name} already exists!" mag_headers = list((self.path / layer_name).glob("*/header.wkw")) + assert ( len(mag_headers) != 0 ), f"Could not find any header.wkw files in {self.path / layer_name}, cannot add layer." @@ -536,6 +537,7 @@ def shallow_copy_dataset( new_dataset_path: Path, name: Optional[str] = None, make_relative: bool = False, + layers_to_ignore: Optional[List[str]] = None, ) -> "Dataset": """ Create a new dataset at the given path. Link all mags of all existing layers. @@ -547,6 +549,8 @@ def shallow_copy_dataset( new_dataset_path, scale=self.scale, name=name or self.name ) for layer_name, layer in self.layers.items(): + if layers_to_ignore is not None and layer_name in layers_to_ignore: + continue new_layer = new_dataset.add_layer_like(layer, layer_name) for mag_view in layer.mags.values(): new_layer.add_symlink_mag(mag_view, make_relative) diff --git a/webknossos/webknossos/dataset/layer.py b/webknossos/webknossos/dataset/layer.py index b2d3a6a08..b59dae4c6 100644 --- a/webknossos/webknossos/dataset/layer.py +++ b/webknossos/webknossos/dataset/layer.py @@ -255,6 +255,10 @@ def get_mag(self, mag: Union[int, str, list, tuple, np.ndarray, Mag]) -> MagView ) return self.mags[mag] + def get_best_mag(self) -> MagView: + + return self.get_mag(min(self.mags.keys())) + def add_mag( self, mag: Union[int, str, list, tuple, np.ndarray, Mag], @@ -604,6 +608,7 @@ def downsample_mag( compress: bool = True, buffer_edge_len: Optional[int] = None, args: Optional[Namespace] = None, + allow_overwrite: bool = False, ) -> None: """ Performs a single downsampling step from `from_mag` to `target_mag`. @@ -616,6 +621,7 @@ def downsample_mag( - "bicubic" The `args` can contain information to distribute the computation. + If allow_overwrite is True, an existing Mag may be overwritten. """ assert ( from_mag in self.mags.keys() @@ -626,7 +632,9 @@ def downsample_mag( ) assert from_mag <= target_mag - assert target_mag not in self.mags + assert ( + allow_overwrite or target_mag not in self.mags + ), "The target mag already exists. Pass allow_overwrite=True if you want to overwrite it." prev_mag_view = self.mags[from_mag] @@ -634,10 +642,13 @@ def downsample_mag( t // s for (t, s) in zip(target_mag.to_list(), from_mag.to_list()) ] - # initialize the new mag - target_mag_view = self._initialize_mag_from_other_mag( - target_mag, prev_mag_view, compress - ) + if target_mag in self.mags.keys() and allow_overwrite: + target_mag_view = self.get_mag(target_mag) + else: + # initialize the new mag + target_mag_view = self._initialize_mag_from_other_mag( + target_mag, prev_mag_view, compress + ) bb_mag1 = self.bounding_box @@ -691,6 +702,34 @@ def downsample_mag( logging.info("Mag {0} successfully cubed".format(target_mag)) + def redownsample( + self, + interpolation_mode: str = "default", + compress: bool = True, + buffer_edge_len: Optional[int] = None, + args: Optional[Namespace] = None, + ) -> None: + """ + Use this method to recompute downsampled magnifications after mutating data in the + base magnification. + """ + + mags = sorted(self.mags.keys(), key=lambda m: m.to_list()) + if len(mags) <= 1: + # No downsampled magnifications exist. Thus, there's nothing to do. + return + from_mag = mags[0] + target_mags = mags[1:] + self.downsample_mag_list( + from_mag, + target_mags, + interpolation_mode, + compress, + buffer_edge_len, + args, + allow_overwrite=True, + ) + def downsample_mag_list( self, from_mag: Mag, @@ -699,6 +738,7 @@ def downsample_mag_list( compress: bool = True, buffer_edge_len: Optional[int] = None, args: Optional[Namespace] = None, + allow_overwrite: bool = False, ) -> None: """ Downsamples the data starting at `from_mag` to each magnification in `target_mags` iteratively. @@ -729,6 +769,7 @@ def downsample_mag_list( compress=compress, buffer_edge_len=buffer_edge_len, args=args, + allow_overwrite=allow_overwrite, ) source_mag = target_mag diff --git a/webknossos/webknossos/geometry/bounding_box.py b/webknossos/webknossos/geometry/bounding_box.py index 49ca0f7a5..502e7bf86 100644 --- a/webknossos/webknossos/geometry/bounding_box.py +++ b/webknossos/webknossos/geometry/bounding_box.py @@ -1,5 +1,6 @@ import json import re +from collections import defaultdict from typing import ( Dict, Generator, @@ -24,10 +25,12 @@ class BoundingBox: topleft: Vec3Int = attr.ib(converter=Vec3Int) size: Vec3Int = attr.ib(converter=Vec3Int) - @property - def bottomright(self) -> Vec3Int: + bottomright = attr.ib(init=False) - return self.topleft + self.size + def __attrs_post_init__(self) -> None: + # Compute bottomright to avoid that it's recomputed every time + # it is needed. + object.__setattr__(self, "bottomright", self.topleft + self.size) def with_topleft(self, new_topleft: Vec3IntLike) -> "BoundingBox": @@ -156,6 +159,25 @@ def from_auto( raise Exception("Unknown bounding box format.") + @staticmethod + def group_boxes_with_aligned_mag( + bounding_boxes: Iterable["BoundingBox"], aligning_mag: Mag + ) -> Dict["BoundingBox", List["BoundingBox"]]: + """ + Groups the given BoundingBox instances by aligning each + bbox to the given mag and using that as the key. + For example, bounding boxes of size 256**3 could be grouped + into the corresponding 1024**3 chunks to which they belong + by using aligning_mag = Mag(1024). + """ + + chunks_with_bboxes = defaultdict(list) + for bbox in bounding_boxes: + chunk_key = bbox.align_with_mag(aligning_mag, ceil=True) + chunks_with_bboxes[chunk_key].append(bbox) + + return chunks_with_bboxes + def to_wkw_dict(self) -> dict: ( # pylint: disable=unbalanced-tuple-unpacking @@ -303,13 +325,20 @@ def contains(self, coord: Union[Vec3IntLike, np.ndarray]) -> bool: assert coord.shape == ( 3, ), f"Numpy array BoundingBox.contains must have shape (3,), got {coord.shape}." + return cast( + bool, + np.all(coord >= self.topleft) and np.all(coord < self.bottomright), + ) else: - coord = Vec3Int(coord).to_np() - - return cast( - bool, - np.all(coord >= self.topleft) and np.all(coord < self.topleft + self.size), - ) + # In earlier versions, we simply converted to ndarray to have + # a unified calculation here, but this turned out to be a performance bottleneck. + # Therefore, the contains-check is performed on the tuple here. + coord = Vec3Int(coord) + return ( + self.topleft[0] <= coord[0] < self.bottomright[0] + and self.topleft[1] <= coord[1] < self.bottomright[1] + and self.topleft[2] <= coord[2] < self.bottomright[2] + ) def contains_bbox(self, inner_bbox: "BoundingBox") -> bool: return inner_bbox.intersected_with(self, dont_assert=True) == inner_bbox @@ -350,7 +379,6 @@ def chunk( for z in range( start[2] - start_adjust[2], start[2] + self.size[2], chunk_size[2] ): - yield BoundingBox([x, y, z], chunk_size).intersected_with(self) def volume(self) -> int: @@ -376,6 +404,9 @@ def offset(self, vector: Vec3IntLike) -> "BoundingBox": return BoundingBox(self.topleft + Vec3Int(vector), self.size) + def __hash__(self) -> int: + return hash(self.to_tuple6()) + class BoundingBoxNamedTuple(NamedTuple): topleft: Tuple[int, int, int] diff --git a/webknossos/webknossos/geometry/vec3_int.py b/webknossos/webknossos/geometry/vec3_int.py index 68dd37092..04fbd9c8d 100644 --- a/webknossos/webknossos/geometry/vec3_int.py +++ b/webknossos/webknossos/geometry/vec3_int.py @@ -3,6 +3,8 @@ import numpy as np +value_error = "Vector components must be three integers or a Vec3IntLike object." + class Vec3Int(tuple): def __new__( @@ -15,7 +17,6 @@ def __new__( return vec as_tuple: Optional[Tuple[int, int, int]] = None - value_error = f"Vector components must be three integers or a Vec3IntLike object, got {vec}, {y}, {z}" if isinstance(vec, int): assert y is not None and z is not None, value_error @@ -27,7 +28,7 @@ def __new__( assert np.count_nonzero(vec % 1) == 0, value_error assert vec.shape == ( 3, - ), f"Numpy array for Vec3Int must have shape (3,), got {vec.shape}." + ), "Numpy array for Vec3Int must have shape (3,)." if isinstance(vec, Iterable): as_tuple = cast(Tuple[int, int, int], tuple(int(item) for item in vec)) assert len(as_tuple) == 3, value_error @@ -35,6 +36,14 @@ def __new__( return super().__new__(cls, cast(Iterable, as_tuple)) + @staticmethod + def from_xyz(x: int, y: int, z: int) -> "Vec3Int": + """Use Vec3Int.from_xyz for fast construction.""" + + # By calling __new__ of tuple directly, we circumvent + # the tolerant (and potentially) slow Vec3Int.__new__ method. + return tuple.__new__(Vec3Int, (x, y, z)) + @property def x(self) -> int: return self[0] @@ -48,13 +57,13 @@ def z(self) -> int: return self[2] def with_x(self, new_x: int) -> "Vec3Int": - return Vec3Int(new_x, self.y, self.z) + return Vec3Int.from_xyz(new_x, self.y, self.z) def with_y(self, new_y: int) -> "Vec3Int": - return Vec3Int(self.x, new_y, self.z) + return Vec3Int.from_xyz(self.x, new_y, self.z) def with_z(self, new_z: int) -> "Vec3Int": - return Vec3Int(self.x, self.y, new_z) + return Vec3Int.from_xyz(self.x, self.y, new_z) def to_np(self) -> np.ndarray: return np.array((self.x, self.y, self.z)) @@ -72,15 +81,13 @@ def _element_wise( self, other: Union[int, "Vec3IntLike"], fn: Callable[[int, Any], int] ) -> "Vec3Int": if isinstance(other, int): - other_imported = Vec3Int(other, other, other) + other_imported = Vec3Int.from_xyz(other, other, other) else: other_imported = Vec3Int(other) - return Vec3Int( - ( - fn(self.x, other_imported.x), - fn(self.y, other_imported.y), - fn(self.z, other_imported.z), - ) + return Vec3Int.from_xyz( + fn(self.x, other_imported.x), + fn(self.y, other_imported.y), + fn(self.z, other_imported.z), ) # note: (arguments incompatible with superclass, do not add Vec3Int to plain tuple! Hence the type:ignore) @@ -100,7 +107,7 @@ def __mod__(self, other: Union[int, "Vec3IntLike"]) -> "Vec3Int": return self._element_wise(other, mod) def __neg__(self) -> "Vec3Int": - return Vec3Int(-self.x, -self.y, -self.z) + return Vec3Int.from_xyz(-self.x, -self.y, -self.z) def ceildiv(self, other: Union[int, "Vec3IntLike"]) -> "Vec3Int": return (self + other - 1) // other diff --git a/webknossos/webknossos/utils.py b/webknossos/webknossos/utils.py index 011fd9af3..b9c64ce19 100644 --- a/webknossos/webknossos/utils.py +++ b/webknossos/webknossos/utils.py @@ -126,3 +126,18 @@ def copy_directory_with_symlinks( else: rel_or_abspath = os.path.abspath(item) symlink_path.symlink_to(rel_or_abspath) + + +def setup_logging(args: argparse.Namespace) -> None: + logging.basicConfig( + level=(logging.DEBUG if args.verbose else logging.INFO), + format="%(asctime)s %(levelname)s %(message)s", + ) + + +def add_verbose_flag(parser: argparse.ArgumentParser) -> None: + parser.add_argument( + "--silent", help="Silent output", dest="verbose", action="store_false" + ) + + parser.set_defaults(verbose=True) diff --git a/wkcuber/wkcuber/utils.py b/wkcuber/wkcuber/utils.py index 9326a3aba..7daf006d0 100644 --- a/wkcuber/wkcuber/utils.py +++ b/wkcuber/wkcuber/utils.py @@ -73,14 +73,6 @@ def open_knossos(info: KnossosDatasetInfo) -> KnossosDataset: return KnossosDataset.open(info.dataset_path, np.dtype(info.dtype)) -def add_verbose_flag(parser: argparse.ArgumentParser) -> None: - parser.add_argument( - "--silent", help="Silent output", dest="verbose", action="store_false" - ) - - parser.set_defaults(verbose=True) - - def add_scale_flag(parser: argparse.ArgumentParser, required: bool = True) -> None: parser.add_argument( "--scale", @@ -140,13 +132,6 @@ def get_channel_and_sample_iters_for_wk_compatibility( return (range(channel_count), range(sample_count)) -def setup_logging(args: argparse.Namespace) -> None: - logging.basicConfig( - level=(logging.DEBUG if args.verbose else logging.INFO), - format="%(asctime)s %(levelname)s %(message)s", - ) - - def find_files( source_path: str, extensions: Iterable[str] ) -> Generator[str, Any, None]: