From 2f5bb477e3c9adb96cef4cb8cc9d7e78aebb8e01 Mon Sep 17 00:00:00 2001 From: Sander Roet Date: Wed, 2 Oct 2024 17:25:46 +0200 Subject: [PATCH] Drop py 39 and upgrade (#230) * go to py 3.10 following SPEC0 * auto-fix with 'ruff check --extend-select=UP --fix' * deal with unsafe fixes --- pyproject.toml | 2 +- src/pytom_tm/angles.py | 5 ++-- src/pytom_tm/correlation.py | 33 +++++++++++------------ src/pytom_tm/extract.py | 7 +++-- src/pytom_tm/io.py | 53 +++++++++++++++---------------------- src/pytom_tm/mask.py | 9 +++---- src/pytom_tm/matching.py | 11 ++++---- src/pytom_tm/template.py | 5 ++-- src/pytom_tm/tmjob.py | 31 +++++++++++----------- src/pytom_tm/utils.py | 2 +- src/pytom_tm/weights.py | 29 ++++++++++---------- 11 files changed, 85 insertions(+), 102 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7993bee7..eed5a1ab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,7 +23,7 @@ classifiers = [ "Topic :: Scientific/Engineering", "Typing :: Typed" ] -requires-python = ">= 3.9" +requires-python = ">= 3.10" dependencies = [ "numpy", "cupy!=13.0.*", #see https://github.com/SBC-Utrecht/pytom-match-pick/issues/106 diff --git a/src/pytom_tm/angles.py b/src/pytom_tm/angles.py index a5a83ab0..8874a466 100644 --- a/src/pytom_tm/angles.py +++ b/src/pytom_tm/angles.py @@ -1,6 +1,5 @@ import pathlib import os -from typing import Union from scipy.spatial.transform import Rotation import numpy as np import healpix as hp @@ -94,7 +93,7 @@ def load_angle_list( def get_angle_list( - angle: Union[pathlib.Path, float], + angle: pathlib.Path | float, sort_angles: bool = True, symmetry: int = 1, log_level: int = logging.DEBUG, @@ -135,7 +134,7 @@ def get_angle_list( f"Will generate an angle list with a maximum increment of {angle}", ) out = angle_to_angle_list(angle, sort_angles, log_level) - elif isinstance(angle, (str, os.PathLike)): + elif isinstance(angle, str | os.PathLike): possible_file_path = pathlib.Path(angle) if possible_file_path.exists() and possible_file_path.suffix == ".txt": logging.log( diff --git a/src/pytom_tm/correlation.py b/src/pytom_tm/correlation.py index a50f7c95..9569da3a 100644 --- a/src/pytom_tm/correlation.py +++ b/src/pytom_tm/correlation.py @@ -4,14 +4,13 @@ import numpy.typing as npt import cupy.typing as cpt -from typing import Optional, Union def mean_under_mask( - data: Union[npt.NDArray[float], cpt.NDArray[float]], - mask: Union[npt.NDArray[float], cpt.NDArray[float]], - mask_weight: Optional[float] = None, -) -> Union[float, cpt.NDArray[float]]: + data: npt.NDArray[float] | cpt.NDArray[float], + mask: npt.NDArray[float] | cpt.NDArray[float], + mask_weight: float | None = None, +) -> float | cpt.NDArray[float]: """Calculate mean of array in the mask region. data and mask can be cupy or numpy arrays. @@ -37,11 +36,11 @@ def mean_under_mask( def std_under_mask( - data: Union[npt.NDArray[float], cpt.NDArray[float]], - mask: Union[npt.NDArray[float], cpt.NDArray[float]], + data: npt.NDArray[float] | cpt.NDArray[float], + mask: npt.NDArray[float] | cpt.NDArray[float], mean: float, - mask_weight: Optional[float] = None, -) -> Union[float, cpt.NDArray[float]]: + mask_weight: float | None = None, +) -> float | cpt.NDArray[float]: """Calculate standard deviation of array in the mask region. Uses mean_under_mask() to calculate the mean of data**2 within the mask. @@ -68,10 +67,10 @@ def std_under_mask( def normalise( - data: Union[npt.NDArray[float], cpt.NDArray[float]], - mask: Optional[Union[npt.NDArray[float], cpt.NDArray[float]]] = None, - mask_weight: Optional[float] = None, -) -> Union[npt.NDArray[float], cpt.NDArray[float]]: + data: npt.NDArray[float] | cpt.NDArray[float], + mask: npt.NDArray[float] | cpt.NDArray[float] | None = None, + mask_weight: float | None = None, +) -> npt.NDArray[float] | cpt.NDArray[float]: """Normalise array by subtracting mean and dividing by standard deviation. If a mask is provided the array is normalised with the mean and std calculated within the mask. @@ -102,10 +101,10 @@ def normalise( def normalised_cross_correlation( - data1: Union[npt.NDArray[float], cpt.NDArray[float]], - data2: Union[npt.NDArray[float], cpt.NDArray[float]], - mask: Optional[Union[npt.NDArray[float], cpt.NDArray[float]]] = None, -) -> Union[float, cpt.NDArray[float]]: + data1: npt.NDArray[float] | cpt.NDArray[float], + data2: npt.NDArray[float] | cpt.NDArray[float], + mask: npt.NDArray[float] | cpt.NDArray[float] | None = None, +) -> float | cpt.NDArray[float]: """Calculate normalised cross correlation between two arrays. Optionally only in a masked region. diff --git a/src/pytom_tm/extract.py b/src/pytom_tm/extract.py index c8ef50c3..42b2ade2 100644 --- a/src/pytom_tm/extract.py +++ b/src/pytom_tm/extract.py @@ -5,7 +5,6 @@ import logging import scipy.ndimage as ndimage import pathlib -from typing import Optional from pytom_tm.tmjob import TMJob from pytom_tm.mask import spherical_mask from pytom_tm.angles import get_angle_list, convert_euler @@ -28,7 +27,7 @@ def predict_tophat_mask( score_volume: npt.NDArray[float], - output_path: Optional[pathlib.Path] = None, + output_path: pathlib.Path | None = None, n_false_positives: float = 1.0, create_plot: bool = True, tophat_connectivity: int = 1, @@ -148,9 +147,9 @@ def extract_particles( job: TMJob, particle_radius_px: int, n_particles: int, - cut_off: Optional[float] = None, + cut_off: float | None = None, n_false_positives: float = 1.0, - tomogram_mask_path: Optional[pathlib.Path] = None, + tomogram_mask_path: pathlib.Path | None = None, tophat_filter: bool = False, create_plot: bool = True, tophat_connectivity: int = 1, diff --git a/src/pytom_tm/io.py b/src/pytom_tm/io.py index 0a1e8aab..4c38f7d5 100644 --- a/src/pytom_tm/io.py +++ b/src/pytom_tm/io.py @@ -6,7 +6,6 @@ import numpy as np from contextlib import contextmanager from operator import attrgetter -from typing import Optional, Union class ParseLogging(argparse.Action): @@ -14,7 +13,7 @@ class ParseLogging(argparse.Action): set these to info/debug.""" def __call__( - self, parser, namespace, values: str, option_string: Optional[str] = None + self, parser, namespace, values: str, option_string: str | None = None ): if values.upper() not in ["INFO", "DEBUG"]: parser.error( @@ -34,12 +33,10 @@ def __call__( parser, namespace, values: pathlib.Path, - option_string: Optional[str] = None, + option_string: str | None = None, ): if not values.is_dir(): - parser.error( - "{0} got a file path that does not exist ".format(option_string) - ) + parser.error(f"{option_string} got a file path that does not exist ") setattr(namespace, self.dest, values) @@ -52,12 +49,10 @@ def __call__( parser, namespace, values: pathlib.Path, - option_string: Optional[str] = None, + option_string: str | None = None, ): if not values.exists(): - parser.error( - "{0} got a file path that does not exist ".format(option_string) - ) + parser.error(f"{option_string} got a file path that does not exist ") setattr(namespace, self.dest, values) @@ -69,11 +64,11 @@ def __call__( self, parser, namespace, - values: Union[int, float], - option_string: Optional[str] = None, + values: int | float, + option_string: str | None = None, ): if values <= 0.0: - parser.error("{0} must be larger than 0".format(option_string)) + parser.error(f"{option_string} must be larger than 0") setattr(namespace, self.dest, values) @@ -83,13 +78,11 @@ class BetweenZeroAndOne(argparse.Action): 0 and 1.""" def __call__( - self, parser, namespace, values: float, option_string: Optional[str] = None + self, parser, namespace, values: float, option_string: str | None = None ): if 1.0 <= values <= 0.0: parser.error( - "{0} is a fraction and can only range between 0 and 1".format( - option_string - ) + f"{option_string} is a fraction and can only range between 0 and 1" ) setattr(namespace, self.dest, values) @@ -105,7 +98,7 @@ def __call__( parser, namespace, values: list[int, int], - option_string: Optional[str] = None, + option_string: str | None = None, ): if not (0 <= values[0] < values[1]): parser.error( @@ -126,8 +119,8 @@ def __call__( self, parser, namespace, - values: Union[list[str, str], str], - option_string: Optional[str] = None, + values: list[str, str] | str, + option_string: str | None = None, ): if len(values) == 2: # two wedge angles provided the min and max try: @@ -147,7 +140,7 @@ def __call__( ) setattr(namespace, self.dest, read_tlt_file(values)) else: - parser.error("{0} can only take one or two arguments".format(option_string)) + parser.error(f"{option_string} can only take one or two arguments") class ParseGPUIndices(argparse.Action): @@ -160,7 +153,7 @@ def __call__( parser, namespace, values: list[int, ...], - option_string: Optional[str] = None, + option_string: str | None = None, ): import cupy @@ -180,20 +173,18 @@ class ParseDoseFile(argparse.Action): dose per tilt.""" def __call__( - self, parser, namespace, values: str, option_string: Optional[str] = None + self, parser, namespace, values: str, option_string: str | None = None ): file_path = pathlib.Path(values) if not file_path.exists(): parser.error( - "{0} provided dose accumulation file does not exist".format( - option_string - ) + f"{option_string} provided dose accumulation file does not exist" ) allowed_suffixes = [".txt"] if file_path.suffix not in allowed_suffixes: parser.error( - "{0} provided dose accumulation file does not have the right suffix, " - "allowed are: {1}".format(option_string, ", ".join(allowed_suffixes)) + f"{option_string} provided dose accumulation file does not have the " + f"right suffix, allowed are: {', '.join(allowed_suffixes)}" ) setattr(namespace, self.dest, read_dose_file(file_path)) @@ -203,7 +194,7 @@ class ParseDefocus(argparse.Action): to their file format, or a txt file containing per line the defocus of each tilt.""" def __call__( - self, parser, namespace, values: str, option_string: Optional[str] = None + self, parser, namespace, values: str, option_string: str | None = None ): if values.endswith((".defocus", ".txt")): file_path = pathlib.Path(values) @@ -395,7 +386,7 @@ def read_txt_file(file_name: pathlib.Path) -> list[float, ...]: output: list[float, ...] list of floats """ - with open(file_name, "r") as fstream: + with open(file_name) as fstream: lines = fstream.readlines() return list(map(float, [x.strip() for x in lines if not x.isspace()])) @@ -450,7 +441,7 @@ def read_imod_defocus_file(file_name: pathlib.Path) -> list[float, ...]: output: list[float, ...] list of floats with defocus (in μm) """ - with open(file_name, "r") as fstream: + with open(file_name) as fstream: lines = fstream.readlines() imod_defocus_version = float(lines[0].strip().split()[5]) # imod defocus files have the values specified in nm: diff --git a/src/pytom_tm/mask.py b/src/pytom_tm/mask.py index cd82279c..454348c2 100644 --- a/src/pytom_tm/mask.py +++ b/src/pytom_tm/mask.py @@ -1,14 +1,13 @@ import numpy as np import numpy.typing as npt -from typing import Optional def spherical_mask( box_size: int, radius: float, - smooth: Optional[float] = None, + smooth: float | None = None, cutoff_sd: int = 3, - center: Optional[float] = None, + center: float | None = None, ) -> npt.NDArray[float]: """Wrapper around ellipsoidal_mask() to create a spherical mask with just a single radius. @@ -42,9 +41,9 @@ def ellipsoidal_mask( major: float, minor1: float, minor2: float, - smooth: Optional[float] = None, + smooth: float | None = None, cutoff_sd: int = 3, - center: Optional[float] = None, + center: float | None = None, ) -> npt.NDArray[float]: """Create an ellipsoidal mask in the specified square box. Ellipsoid is defined by 3 radius on x,y, and z axis. diff --git a/src/pytom_tm/matching.py b/src/pytom_tm/matching.py index f6789735..52c466d9 100644 --- a/src/pytom_tm/matching.py +++ b/src/pytom_tm/matching.py @@ -3,7 +3,6 @@ import numpy.typing as npt import voltools as vt import gc -from typing import Optional from cupyx.scipy.fft import rfftn, irfftn from tqdm import tqdm from pytom_tm.correlation import mean_under_mask, std_under_mask @@ -17,8 +16,8 @@ def __init__( template: npt.NDArray[float], mask: npt.NDArray[float], device_id: int, - wedge: Optional[npt.NDArray[float]] = None, - phase_randomized_template: Optional[npt.NDArray[float]] = None, + wedge: npt.NDArray[float] | None = None, + phase_randomized_template: npt.NDArray[float] | None = None, ): """Initialize a template matching plan. All the necessary cupy arrays will be allocated on the GPU. @@ -126,8 +125,8 @@ def __init__( angle_list: list[tuple[float, float, float]], angle_ids: list[int], mask_is_spherical: bool = True, - wedge: Optional[npt.NDArray[float]] = None, - stats_roi: Optional[tuple[slice, slice, slice]] = None, + wedge: npt.NDArray[float] | None = None, + stats_roi: tuple[slice, slice, slice] | None = None, noise_correction: bool = False, rng_seed: int = 321, ): @@ -220,7 +219,7 @@ def run(self) -> tuple[npt.NDArray[float], npt.NDArray[float], dict]: - a dictionary with three floats of search statistics; 'search_space', 'variance', and 'std' """ - print("Progress job_{} on device {:d}:".format(self.job_id, self.device_id)) + print(f"Progress job_{self.job_id} on device {self.device_id:d}:") # Size x template (sxz) and center x template (cxt) sxt, syt, szt = self.plan.template.shape diff --git a/src/pytom_tm/template.py b/src/pytom_tm/template.py index dcbef506..f72566a4 100644 --- a/src/pytom_tm/template.py +++ b/src/pytom_tm/template.py @@ -4,7 +4,6 @@ import logging from scipy.ndimage import center_of_mass, zoom from scipy.fft import rfftn, irfftn -from typing import Optional from pytom_tm.weights import ( create_gaussian_low_pass, radial_reduced_grid, @@ -16,8 +15,8 @@ def generate_template_from_map( input_spacing: float, output_spacing: float, center: bool = False, - filter_to_resolution: Optional[float] = None, - output_box_size: Optional[int] = None, + filter_to_resolution: float | None = None, + output_box_size: int | None = None, ) -> npt.NDArray[float]: """Generate a template from a density map. diff --git a/src/pytom_tm/tmjob.py b/src/pytom_tm/tmjob.py index 3d51fc7c..d34cba73 100644 --- a/src/pytom_tm/tmjob.py +++ b/src/pytom_tm/tmjob.py @@ -8,7 +8,6 @@ import numpy.typing as npt import json import logging -from typing import Optional, Union from scipy.fft import next_fast_len, rfftn, irfftn from pytom_tm.angles import get_angle_list from pytom_tm.matching import TemplateMatchingGPU @@ -41,7 +40,7 @@ def load_json_to_tmjob( job: TMJob initialized TMJob """ - with open(file_name, "r") as fstream: + with open(file_name) as fstream: data = json.load(fstream) # wrangle dtypes @@ -264,24 +263,24 @@ def __init__( template: pathlib.Path, mask: pathlib.Path, output_dir: pathlib.Path, - angle_increment: Optional[Union[str, float]] = None, + angle_increment: str | float | None = None, mask_is_spherical: bool = True, - tilt_angles: Optional[list[float, ...]] = None, + tilt_angles: list[float, ...] | None = None, tilt_weighting: bool = False, - search_x: Optional[list[int, int]] = None, - search_y: Optional[list[int, int]] = None, - search_z: Optional[list[int, int]] = None, - tomogram_mask: Optional[pathlib.Path] = None, - voxel_size: Optional[float] = None, - low_pass: Optional[float] = None, - high_pass: Optional[float] = None, - dose_accumulation: Optional[list[float, ...]] = None, - ctf_data: Optional[list[dict, ...]] = None, + search_x: list[int, int] | None = None, + search_y: list[int, int] | None = None, + search_z: list[int, int] | None = None, + tomogram_mask: pathlib.Path | None = None, + voxel_size: float | None = None, + low_pass: float | None = None, + high_pass: float | None = None, + dose_accumulation: list[float, ...] | None = None, + ctf_data: list[dict, ...] | None = None, whiten_spectrum: bool = False, rotational_symmetry: int = 1, pytom_tm_version_number: str = PYTOM_TM_VERSION, job_loaded_for_extraction: bool = False, - particle_diameter: Optional[float] = None, + particle_diameter: float | None = None, random_phase_correction: bool = False, rng_seed: int = 321, defocus_handedness: int = 0, @@ -725,7 +724,7 @@ def split_volume_search(self, split: tuple[int, int, int]) -> list[TMJob, ...]: return self.sub_jobs def merge_sub_jobs( - self, stats: Optional[list[dict, ...]] = None + self, stats: list[dict, ...] | None = None ) -> tuple[npt.NDArray[float], npt.NDArray[float]]: """Merge the sub jobs present in self.sub_jobs together to create the final output score and angle maps. @@ -811,7 +810,7 @@ def merge_sub_jobs( def start_job( self, gpu_id: int, return_volumes: bool = False - ) -> Union[tuple[npt.NDArray[float], npt.NDArray[float]], dict]: + ) -> tuple[npt.NDArray[float], npt.NDArray[float]] | dict: """Run this template matching job on the specified GPU. Search statistics of the job will always be assigned to the self.job_stats. diff --git a/src/pytom_tm/utils.py b/src/pytom_tm/utils.py index 5e842f84..6f1dbf5f 100644 --- a/src/pytom_tm/utils.py +++ b/src/pytom_tm/utils.py @@ -2,7 +2,7 @@ import sys -class mute_stdout_stderr(object): +class mute_stdout_stderr: """Context manager to redirect stdout and stderr to devnull. Only used to prevent terminal flooding in unittests.""" diff --git a/src/pytom_tm/weights.py b/src/pytom_tm/weights.py index 9aeb96c6..a438b0d5 100644 --- a/src/pytom_tm/weights.py +++ b/src/pytom_tm/weights.py @@ -2,7 +2,6 @@ import numpy.typing as npt import scipy.ndimage as ndimage import voltools as vt -from typing import Optional, Union from pytom_tm.io import UnequalSpacingError from itertools import pairwise @@ -83,7 +82,7 @@ def wavelength_ev2m(voltage: float) -> float: def radial_reduced_grid( - shape: Union[tuple[int, int, int], tuple[int, int]], shape_is_reduced: bool = False + shape: tuple[int, int, int] | tuple[int, int], shape_is_reduced: bool = False ) -> npt.NDArray[float]: """Calculates a Fourier space radial reduced grid for the given input shape, with the 0 frequency in the center of the output image. Values range from 0 in the @@ -143,7 +142,7 @@ def radial_reduced_grid( def create_gaussian_low_pass( - shape: Union[tuple[int, int, int], tuple[int, int]], + shape: tuple[int, int, int] | tuple[int, int], spacing: float, resolution: float, ) -> npt.NDArray[float]: @@ -174,7 +173,7 @@ def create_gaussian_low_pass( def create_gaussian_high_pass( - shape: Union[tuple[int, int, int], tuple[int, int]], + shape: tuple[int, int, int] | tuple[int, int], spacing: float, resolution: float, ) -> npt.NDArray[float]: @@ -205,10 +204,10 @@ def create_gaussian_high_pass( def create_gaussian_band_pass( - shape: Union[tuple[int, int, int], tuple[int, int]], + shape: tuple[int, int, int] | tuple[int, int], spacing: float, - low_pass: Optional[float] = None, - high_pass: Optional[float] = None, + low_pass: float | None = None, + high_pass: float | None = None, ) -> npt.NDArray[float]: """Resolution bands presents the resolution shells where information needs to be maintained. For example the bands might be (150A, 40A). For a spacing of 15A @@ -263,11 +262,11 @@ def create_wedge( voxel_size: float, cut_off_radius: float = 1.0, angles_in_degrees: bool = True, - low_pass: Optional[float] = None, - high_pass: Optional[float] = None, + low_pass: float | None = None, + high_pass: float | None = None, tilt_weighting: bool = False, - accumulated_dose_per_tilt: Optional[list[float, ...]] = None, - ctf_params_per_tilt: Optional[list[dict]] = None, + accumulated_dose_per_tilt: list[float, ...] | None = None, + ctf_params_per_tilt: list[dict] | None = None, ) -> npt.NDArray[float]: """This function returns a wedge volume that is either symmetric or asymmetric depending on wedge angle input. @@ -485,8 +484,8 @@ def _create_tilt_weighted_wedge( tilt_angles: list[float, ...], cut_off_radius: float, pixel_size_angstrom: float, - accumulated_dose_per_tilt: Optional[list[float, ...]] = None, - ctf_params_per_tilt: Optional[list[dict]] = None, + accumulated_dose_per_tilt: list[float, ...] | None = None, + ctf_params_per_tilt: list[dict] | None = None, ) -> npt.NDArray[float]: """ The following B-factor heuristic is used (as mentioned in the M paper, and @@ -634,7 +633,7 @@ def _create_tilt_weighted_wedge( def create_ctf( - shape: Union[tuple[int, int, int], tuple[int, int]], + shape: tuple[int, int, int] | tuple[int, int], pixel_size: float, defocus: float, amplitude_contrast: float, @@ -791,7 +790,7 @@ def power_spectrum_profile(image: npt.NDArray[float]) -> npt.NDArray[float]: def profile_to_weighting( - profile: npt.NDArray[float], shape: Union[tuple[int, int], tuple[int, int, int]] + profile: npt.NDArray[float], shape: tuple[int, int] | tuple[int, int, int] ) -> npt.NDArray[float]: """Calculate a radial weighing (filter) from a spectrum profile.