diff --git a/pyproject.toml b/pyproject.toml index c00908024..4291211e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -114,6 +114,7 @@ select = [ "NPY", # numpy specific rules "PL", # pylint "SIM", # flake-8-simplify + "UP", # pyupgrade "W", # pycodestyle ] ignore = [ diff --git a/src/semeio/_docs_utils/_json_schema_2_rst.py b/src/semeio/_docs_utils/_json_schema_2_rst.py index 7e4a40623..ab548eccf 100644 --- a/src/semeio/_docs_utils/_json_schema_2_rst.py +++ b/src/semeio/_docs_utils/_json_schema_2_rst.py @@ -1,5 +1,4 @@ from copy import deepcopy -from typing import List, Optional, Union def _insert_ref(schema: dict, defs: dict) -> dict: @@ -66,8 +65,8 @@ def _create_docs(schema: dict) -> str: def _make_documentation( - schema: Union[list, dict, str], - required: Optional[List[str]] = None, + schema: list | dict | str, + required: list[str] | None = None, level: int = 0, preface: str = "", element_seperator: str = "\n\n", diff --git a/src/semeio/fmudesign/_excel2dict.py b/src/semeio/fmudesign/_excel2dict.py index 9a3ac53b4..1555ae685 100644 --- a/src/semeio/fmudesign/_excel2dict.py +++ b/src/semeio/fmudesign/_excel2dict.py @@ -83,9 +83,7 @@ def _find_geninput_sheetname(input_filename): if len(general_input_sheet) > 1: raise ValueError( - "More than one sheet with general input. Sheetnames are {} ".format( - general_input_sheet - ) + f"More than one sheet with general input. Sheetnames are {general_input_sheet} " ) if not general_input_sheet: @@ -120,9 +118,7 @@ def _find_onebyone_defaults_sheet(input_filename): default_values_sheet.append(sheet) if len(default_values_sheet) > 1: raise ValueError( - "More than one sheet with default values. Sheetnames are {} ".format( - default_values_sheet - ) + f"More than one sheet with default values. Sheetnames are {default_values_sheet} " ) if len(default_values_sheet) == []: @@ -175,9 +171,9 @@ def _check_designinput(dsgn_input): if _has_value(row.sensname): if row.sensname in sensitivity_names: raise ValueError( - "sensname '{}' was found on more than one row in designinput " + f"sensname '{row.sensname}' was found on more than one row in designinput " "sheet. Two sensitivities can not share the same sensname. " - "Please correct this and rerun".format(row.sensname) + "Please correct this and rerun" ) sensitivity_names.append(row.sensname) @@ -189,10 +185,10 @@ def _check_for_mixed_sensitivities(sens_name, sens_group): types = sens_group.groupby("type", sort=False) if len(types) > 1: raise ValueError( - "The sensitivity with sensname '{}' in designinput sheet contains more " + f"The sensitivity with sensname '{sens_name}' in designinput sheet contains more " "than one sensitivity type. For each sensname all parameters must be " "specified using the same type (seed, scenario, dist, ref, background, " - "extern)".format(sens_name) + "extern)" ) @@ -393,11 +389,9 @@ def _read_defaultvalues(filename, sheetname): for row in default_df.itertuples(): if str(row[0]) in default_dict: print( - "WARNING: The default value '{}' " - "is listed twice in the sheet '{}'. " - "Only the first entry will be used in output file".format( - row[0], sheetname - ) + f"WARNING: The default value '{row[0]}' " + f"is listed twice in the sheet '{sheetname}'. " + "Only the first entry will be used in output file" ) else: default_dict[str(row[0])] = row[1] @@ -432,9 +426,9 @@ def _read_dependencies(filename, sheetname, from_parameter): depend_dict["to_params"][key] = depend_df[key].tolist() else: raise ValueError( - "Parameter {} specified to have derived parameters, " - "but the sheet specifying the dependencies {} does " - "not contain the input parameter. ".format(from_parameter, sheetname) + f"Parameter {from_parameter} specified to have derived parameters, " + f"but the sheet specifying the dependencies {sheetname} does " + "not contain the input parameter. " ) return depend_dict @@ -479,25 +473,25 @@ def _read_background(inp_filename, bck_sheet): ) if not _has_value(row.dist_param1): raise ValueError( - "Parameter {} has been input " + f"Parameter {row.param_name} has been input " "in background sheet but with empty " - "first distribution parameter ".format(row.param_name) + "first distribution parameter " ) if not _has_value(row.dist_param2) and _has_value(row.dist_param3): raise ValueError( - "Parameter {} has been input in " + f"Parameter {row.param_name} has been input in " "background sheet with " 'value for "dist_param3" while ' '"dist_param2" is empty. This is not ' - "allowed".format(row.param_name) + "allowed" ) if not _has_value(row.dist_param3) and _has_value(row.dist_param4): raise ValueError( - "Parameter {} has been input in " + f"Parameter {row.param_name} has been input in " "background sheet with " 'value for "dist_param4" while ' '"dist_param3" is empty. This is not ' - "allowed".format(row.param_name) + "allowed" ) distparams = [ item @@ -545,15 +539,15 @@ def _read_scenario_sensitivity(sensgroup): for row in sensgroup.itertuples(): if not _has_value(row.param_name): raise ValueError( - "Scenario sensitivity {} specified " + f"Scenario sensitivity {row.sensname} specified " "where one line has empty parameter " - "name ".format(row.sensname) + "name " ) if not _has_value(row.value1): raise ValueError( - "Parameter {} har been input " + f"Parameter {row.param_name} har been input " 'as type "scenario" but with empty ' - "value in value1 column ".format(row.param_name) + "value in value1 column " ) casedict1[str(row.param_name)] = row.value1 @@ -596,9 +590,9 @@ def _read_constants(sensgroup): for row in sensgroup.itertuples(): if not _has_value(row.dist_param1): raise ValueError( - "Parameter name {} has been input " + f"Parameter name {row.param_name} has been input " 'in a sensitivity of type "seed". \n' - "If {} was meant to be the name of " + f"If {row.param_name} was meant to be the name of " "the seed parameter, this is " "unfortunately not allowed. " "The seed parameter name is standardised " @@ -606,9 +600,7 @@ def _read_constants(sensgroup): "If you instead meant to specify a constant " "value for another parameter in the seed " 'sensitivity, please remember "const" in ' - 'dist_name and a value in "dist_param1". '.format( - row.param_name, row.param_name - ) + 'dist_name and a value in "dist_param1". ' ) distparams = row.dist_param1 paramdict[str(row.param_name)] = [str(row.dist_name), distparams] @@ -631,29 +623,29 @@ def _read_dist_sensitivity(sensgroup): for row in sensgroup.itertuples(): if not _has_value(row.param_name): raise ValueError( - "Dist sensitivity {} specified " + f"Dist sensitivity {row.sensname} specified " "where one line has empty parameter " - "name ".format(row.sensname) + "name " ) if not _has_value(row.dist_param1): raise ValueError( - "Parameter {} has been input " + f"Parameter {row.param_name} has been input " 'as type "dist" but with empty ' - "first distribution parameter ".format(row.param_name) + "first distribution parameter " ) if not _has_value(row.dist_param2) and _has_value(row.dist_param3): raise ValueError( - "Parameter {} has been input with " + f"Parameter {row.param_name} has been input with " 'value for "dist_param3" while ' '"dist_param2" is empty. This is not ' - "allowed".format(row.param_name) + "allowed" ) if not _has_value(row.dist_param3) and _has_value(row.dist_param4): raise ValueError( - "Parameter {} has been input with " + f"Parameter {row.param_name} has been input with " 'value for "dist_param4" while ' '"dist_param3" is empty. This is not ' - "allowed".format(row.param_name) + "allowed" ) distparams = [ item diff --git a/src/semeio/fmudesign/_tornado_onebyone.py b/src/semeio/fmudesign/_tornado_onebyone.py index c06a996de..fdc6c9237 100755 --- a/src/semeio/fmudesign/_tornado_onebyone.py +++ b/src/semeio/fmudesign/_tornado_onebyone.py @@ -188,7 +188,7 @@ def calc_tornadoinput( avg1 = 0 print( "Warning: Number of ok realizations is 0 in" - "sensitivity {} case1".format(sensname) + f"sensitivity {sensname} case1" ) if designsummary.loc[sensno]["senstype"] == "mc": @@ -227,7 +227,7 @@ def calc_tornadoinput( avg2 = 0 print( "Warning: Number of ok realizations is 0 in" - "sensitivity {} case2".format(sensname) + f"sensitivity {sensname} case2" ) subset2name = designsummary.loc[sensno]["casename2"] else: diff --git a/src/semeio/fmudesign/create_design.py b/src/semeio/fmudesign/create_design.py index fa50b6f11..740486cbe 100644 --- a/src/semeio/fmudesign/create_design.py +++ b/src/semeio/fmudesign/create_design.py @@ -13,9 +13,11 @@ # are irrelevant and would only confuse users. We suppress them by redirecting # stdout/stderr during import. # https://github.com/cvxpy/cvxpy/issues/2470 -with open(os.devnull, "w") as devnull, contextlib.redirect_stdout( - devnull -), contextlib.redirect_stderr(devnull): +with ( + open(os.devnull, "w") as devnull, + contextlib.redirect_stdout(devnull), + contextlib.redirect_stderr(devnull), +): import cvxpy as cp import numpy as np @@ -330,7 +332,7 @@ def to_xlsx( print( "Warning: Output filename did not have extension .xlsx " "but the export format is Excel .xlsx . " - "Changing outputname to {}".format(filename) + f"Changing outputname to {filename}" ) xlsxwriter = pd.ExcelWriter(filename, engine="openpyxl") @@ -383,7 +385,7 @@ def add_seeds(self, seeds, max_reals): "Valid choices for seeds are None, " '"default" or an existing filename. ' "Neither was found in this case. seeds " - "had been specified as {} .".format(seeds) + f"had been specified as {seeds} ." ) def add_background( @@ -446,20 +448,16 @@ def _fill_with_background_values(self): if len(temp_df) > len(self.backgroundvalues): raise ValueError( "Provided number of background values " - "{} is smaller than number" - " of realisations for sensitivity {}".format( - len(self.backgroundvalues), sensname - ) + f"{len(self.backgroundvalues)} is smaller than number" + f" of realisations for sensitivity {sensname}" ) elif len(temp_df) > len(self.backgroundvalues): print( "Provided number of background values " - "({}) is smaller than number" - " of realisations for sensitivity {}" - " and parameter {}. " - "Will be filled with default values.".format( - len(self.backgroundvalues), sensname, key - ) + f"({len(self.backgroundvalues)}) is smaller than number" + f" of realisations for sensitivity {sensname}" + f" and parameter {key}. " + "Will be filled with default values." ) existing_values = result_values.copy() result_values = pd.concat([existing_values, temp_df]) @@ -490,13 +488,13 @@ def _fill_derived_params(self, depend_dict): ) if self.designvalues[param].isnull().any(): raise ValueError( - "Column for derived parameter {} " + f"Column for derived parameter {param} " "contains NaN. Check input " "defining dependencies. " "Could be Wrong values or that " "values for input variable in " "dependencies sheet " - "should be specified as strings.".format(param) + "should be specified as strings." ) def _add_dist_background( @@ -603,7 +601,7 @@ def generate(self, realnums, seedname, seedvalues, parameters): raise ValueError( 'A sensitivity of type "seed" can only have ' "additional parameters where dist_name is " - '"const". Check sensitivity {}"'.format(self.sensname) + f'"const". Check sensitivity {self.sensname}"' ) self.sensvalues[key] = constant @@ -967,9 +965,9 @@ def generate(self, realnums, filename, parameters, seedvalues): extern_values = _parameters_from_extern(filename) if len(realnums) > len(extern_values): raise ValueError( - "Number of realisations {} specified for " - "sensitivity {} is larger than rows in " - "file {}".format(len(realnums), self.sensname, filename) + f"Number of realisations {len(realnums)} specified for " + f"sensitivity {self.sensname} is larger than rows in " + f"file {filename}" ) for param in parameters: if param in extern_values: @@ -1064,7 +1062,7 @@ def _printwarning(corr_group_name): "Using designinput sheets where " "corr_sheet is only specified for one parameter " "will cause non-correlated parameters .\n" - "ONLY ONE PARAMETER WAS SPECIFIED TO USE CORR_SHEET {}\n" + f"ONLY ONE PARAMETER WAS SPECIFIED TO USE CORR_SHEET {corr_group_name}\n" "\n" "Note change in how correlated parameters are specified \n" "from fmudeisgn version 1.0.1 in August 2019 :\n" @@ -1079,5 +1077,4 @@ def _printwarning(corr_group_name): "one-by-one-sensitivities\n" "\n" "####################################################\n" - "".format(corr_group_name) ) diff --git a/src/semeio/fmudesign/design_distributions.py b/src/semeio/fmudesign/design_distributions.py index da9e2feee..ae64a2c81 100644 --- a/src/semeio/fmudesign/design_distributions.py +++ b/src/semeio/fmudesign/design_distributions.py @@ -4,7 +4,6 @@ import re from pathlib import Path -from typing import Union import numpy as np import pandas as pd @@ -299,7 +298,7 @@ def draw_values_triangular(dist_parameters, numreals, rng, normalscoresamples=No if high == low: # collapsed distribution print( "Low and high parameters for triangular distribution" - " are equal. Using constant {}".format(low) + f" are equal. Using constant {low}" ) if normalscoresamples is not None: values = scipy.stats.uniform.ppf( @@ -355,7 +354,7 @@ def draw_values_pert(dist_parameters, numreals, rng, normalscoresamples=None): if high == low: # collapsed distribution print( "Low and high parameters for pert distribution" - " are equal. Using constant {}".format(low) + f" are equal. Using constant {low}" ) if normalscoresamples is not None: values = scipy.stats.uniform.ppf( @@ -515,9 +514,7 @@ def is_number(teststring): return False -def read_correlations( - excel_filename: Union[str, Path], corr_sheet: str -) -> pd.DataFrame: +def read_correlations(excel_filename: str | Path, corr_sheet: str) -> pd.DataFrame: """Reading correlation info for a monte carlo sensitivity diff --git a/src/semeio/fmudesign/fmudesignrunner.py b/src/semeio/fmudesign/fmudesignrunner.py index 25c0b6185..e0bc30e22 100644 --- a/src/semeio/fmudesign/fmudesignrunner.py +++ b/src/semeio/fmudesign/fmudesignrunner.py @@ -78,14 +78,14 @@ def main(): if isinstance(args.config, str): if not Path(args.config).is_file(): - raise IOError(f"Input file {args.config} does not exist") + raise OSError(f"Input file {args.config} does not exist") input_dict = excel2dict_design(args.config, sheetnames) if args.config == args.destination: - raise IOError( - 'Identical name "{}" have been provided for the input' + raise OSError( + f'Identical name "{args.config}" have been provided for the input' "file and the output file. " - " Exiting.....".format(args.config) + " Exiting....." ) design = DesignMatrix() diff --git a/src/semeio/forward_models/__init__.py b/src/semeio/forward_models/__init__.py index 0486c1f4b..2ad92ad60 100644 --- a/src/semeio/forward_models/__init__.py +++ b/src/semeio/forward_models/__init__.py @@ -1,5 +1,3 @@ -from typing import Optional - from ert import ( ForwardModelStepDocumentation, ForwardModelStepJSON, @@ -34,7 +32,7 @@ def validate_pre_experiment(self, fm_step_json: ForwardModelStepJSON) -> None: return fm_step_json @staticmethod - def documentation() -> Optional[ForwardModelStepDocumentation]: + def documentation() -> ForwardModelStepDocumentation | None: return ForwardModelStepDocumentation( category="utility.templating", source_package="semeio", @@ -73,7 +71,7 @@ def validate_pre_experiment(self, fm_step_json: ForwardModelStepJSON) -> None: return fm_step_json @staticmethod - def documentation() -> Optional[ForwardModelStepDocumentation]: + def documentation() -> ForwardModelStepDocumentation | None: return ForwardModelStepDocumentation( category="utility.templating", source_package="semeio", @@ -112,7 +110,7 @@ def __init__(self): ) @staticmethod - def documentation() -> Optional[ForwardModelStepDocumentation]: + def documentation() -> ForwardModelStepDocumentation | None: return ForwardModelStepDocumentation( category="utility.transformation", source_package="semeio", @@ -167,7 +165,7 @@ def __init__(self): ) @staticmethod - def documentation() -> Optional[ForwardModelStepDocumentation]: + def documentation() -> ForwardModelStepDocumentation | None: return ForwardModelStepDocumentation( category="modelling.surface", source_package="semeio", @@ -204,7 +202,7 @@ def __init__(self): ) @staticmethod - def documentation() -> Optional[ForwardModelStepDocumentation]: + def documentation() -> ForwardModelStepDocumentation | None: return ForwardModelStepDocumentation( category="modelling.reservoir", source_package="semeio", @@ -235,7 +233,7 @@ def __init__(self): ) @staticmethod - def documentation() -> Optional[ForwardModelStepDocumentation]: + def documentation() -> ForwardModelStepDocumentation | None: return ForwardModelStepDocumentation( category="utility.file_system", source_package="semeio", @@ -254,7 +252,7 @@ def __init__(self): ) @staticmethod - def documentation() -> Optional[ForwardModelStepDocumentation]: + def documentation() -> ForwardModelStepDocumentation | None: return ForwardModelStepDocumentation( category="utility.file_system", source_package="semeio", @@ -274,7 +272,7 @@ def __init__(self): ) @staticmethod - def documentation() -> Optional[ForwardModelStepDocumentation]: + def documentation() -> ForwardModelStepDocumentation | None: return ForwardModelStepDocumentation( category="utility.file_system", source_package="semeio", diff --git a/src/semeio/forward_models/design2params/design2params.py b/src/semeio/forward_models/design2params/design2params.py index 5add438d3..c02d145c6 100644 --- a/src/semeio/forward_models/design2params/design2params.py +++ b/src/semeio/forward_models/design2params/design2params.py @@ -252,11 +252,11 @@ def _invalid_design_realizations(design_matrix): :raises: SystemExit if some parameter names are not allowed """ - empty_cell_coords = list(zip(*np.where(pd.isna(design_matrix)))) + empty_cell_coords = list(zip(*np.where(pd.isna(design_matrix)), strict=False)) empties = [ f"Realization {i}, column {design_matrix.columns[j]}" - for i, j in zip(*np.where(pd.isna(design_matrix))) + for i, j in zip(*np.where(pd.isna(design_matrix)), strict=False) ] if len(empties) > 0: logger.warning(f"Design matrix contains empty cells {empties}") diff --git a/src/semeio/forward_models/design_kw/design_kw.py b/src/semeio/forward_models/design_kw/design_kw.py index 060306b2a..bb369a98e 100644 --- a/src/semeio/forward_models/design_kw/design_kw.py +++ b/src/semeio/forward_models/design_kw/design_kw.py @@ -1,7 +1,7 @@ import logging import re import shlex -from typing import Dict, List, Mapping +from collections.abc import Mapping from ert import ForwardModelStepWarning @@ -58,8 +58,8 @@ def run( def find_matching_errors( - line: str, template_file_name: str, template: List[str] -) -> List[str]: + line: str, template_file_name: str, template: list[str] +) -> list[str]: errors = [] for unmatched in unmatched_templates(line): if is_perl(template_file_name, template): @@ -83,7 +83,7 @@ def is_perl(file_name, template): return file_name.endswith(".pl") or template[0].find("perl") != -1 -def is_xml(file_name: str, template: List[str]) -> bool: +def is_xml(file_name: str, template: list[str]) -> bool: return file_name.endswith(".xml") or template[0].find("?xml") != -1 @@ -100,7 +100,7 @@ def is_comment(line): return ecl_comment_pattern.search(line) or std_comment_pattern.search(line) -def extract_key_value(parameters: List[str]) -> Dict[str, str]: +def extract_key_value(parameters: list[str]) -> dict[str, str]: """Parses a list of strings, looking for key-value pairs pr. line separated by whitespace, into a dictionary. diff --git a/src/semeio/forward_models/overburden_timeshift/ots.py b/src/semeio/forward_models/overburden_timeshift/ots.py index 0fb85621b..a8c8ae1a8 100644 --- a/src/semeio/forward_models/overburden_timeshift/ots.py +++ b/src/semeio/forward_models/overburden_timeshift/ots.py @@ -432,7 +432,10 @@ def _vintages_name_date(vintage_pairs): vintages_name.append(f"S{i}") Vintage = namedtuple("Vintages", "name date") - return [Vintage(name, date) for name, date in zip(vintages_name, vintages_date)] + return [ + Vintage(name, date) + for name, date in zip(vintages_name, vintages_date, strict=False) + ] def _report(self, func_name, base, monitor, num_points_calculated): if self._convention == 1: diff --git a/src/semeio/forward_models/overburden_timeshift/ots_config.py b/src/semeio/forward_models/overburden_timeshift/ots_config.py index e9800fdbc..65e716475 100644 --- a/src/semeio/forward_models/overburden_timeshift/ots_config.py +++ b/src/semeio/forward_models/overburden_timeshift/ots_config.py @@ -1,13 +1,12 @@ from datetime import date from pathlib import Path -from typing import TYPE_CHECKING, List, Literal, Optional +from typing import TYPE_CHECKING, Annotated, Literal, Self from pydantic import BaseModel, Field, conlist, field_validator, model_validator from resdata.resfile import ResdataFile -from typing_extensions import Annotated, Self if TYPE_CHECKING: - ConstrainedList = List[date] + ConstrainedList = list[date] else: ConstrainedList = conlist(date, min_length=2, max_length=2) @@ -23,7 +22,7 @@ class Vintages(BaseModel): """ ts_simple: Annotated[ - List[ConstrainedList], + list[ConstrainedList], Field( description=( "Simple TimeShift geertsma algorithm. " @@ -32,17 +31,17 @@ class Vintages(BaseModel): ), ] = [] ts: Annotated[ - List[ConstrainedList], + list[ConstrainedList], Field( description="TimeShift geertsma algorithm, which uses velocity, very slow" ), ] = [] ts_rporv: Annotated[ - List[ConstrainedList], + list[ConstrainedList], Field(description="Delta pressure multiplied by cell volume, relatively fast"), ] = [] dpv: Annotated[ - List[ConstrainedList], + list[ConstrainedList], Field( description=( "Calculates timeshift without using velocity. The velocity is only " @@ -127,7 +126,7 @@ class OTSConfig(BaseModel): ), ] horizon: Annotated[ - Optional[str], + str | None, Field( None, description=( @@ -138,7 +137,7 @@ class OTSConfig(BaseModel): ] eclbase: Annotated[str, Field(description="Path to the Eclipse case")] vintages_export_file: Annotated[ - Optional[str], + str | None, Field( None, description="Path to resulting text file, which contains all computed " @@ -146,7 +145,7 @@ class OTSConfig(BaseModel): ), ] velocity_model: Annotated[ - Optional[str], + str | None, Field(None, description="Path to segy file containing the velocity field"), ] mapaxes: Annotated[ diff --git a/src/semeio/forward_models/rft/utility.py b/src/semeio/forward_models/rft/utility.py index dc88a9357..0aa350685 100644 --- a/src/semeio/forward_models/rft/utility.py +++ b/src/semeio/forward_models/rft/utility.py @@ -3,7 +3,6 @@ import os import warnings from pathlib import Path -from typing import List, Tuple from resdata.grid import Grid from resdata.rft import ResdataRFTFile @@ -23,7 +22,7 @@ def existing_directory(path): def load_and_parse_well_time_file( filename: str, -) -> List[Tuple[str, datetime.date, int]]: +) -> list[tuple[str, datetime.date, int]]: """ Reads and parses a file from disk, supporting 2 formats: diff --git a/src/semeio/hook_implementations/forward_models.py b/src/semeio/hook_implementations/forward_models.py index 3d0a2bc32..6d96c1e8d 100644 --- a/src/semeio/hook_implementations/forward_models.py +++ b/src/semeio/hook_implementations/forward_models.py @@ -1,5 +1,4 @@ import importlib -from typing import Dict import ert import importlib_resources @@ -22,7 +21,7 @@ def _remove_suffix(string: str, suffix: str) -> str: return string[: -len(suffix)] -def _get_forward_models_from_directory(directory: str) -> Dict[str, str]: +def _get_forward_models_from_directory(directory: str) -> dict[str, str]: resource_directory_ref = importlib_resources.files("semeio") / directory all_files = [] diff --git a/tests/communication/unit/test_semeio_script.py b/tests/communication/unit/test_semeio_script.py index d59f95586..fc0b42333 100644 --- a/tests/communication/unit/test_semeio_script.py +++ b/tests/communication/unit/test_semeio_script.py @@ -90,7 +90,7 @@ def assert_log(messages, log_file): log_data = file.readlines() assert len(messages) == len(log_data) - for msg, log_entry in zip(messages, log_data): + for msg, log_entry in zip(messages, log_data, strict=False): assert msg in log_entry diff --git a/tests/fmudesign/test_excel2dict.py b/tests/fmudesign/test_excel2dict.py index 798de3167..42c3a4247 100644 --- a/tests/fmudesign/test_excel2dict.py +++ b/tests/fmudesign/test_excel2dict.py @@ -75,7 +75,7 @@ def test_excel2dict_design(tmpdir, monkeypatch): # Dump to yaml: inputdict_to_yaml(dict_design, "dictdesign.yaml") assert os.path.exists("dictdesign.yaml") - with open("dictdesign.yaml", "r", encoding="utf-8") as inputfile: + with open("dictdesign.yaml", encoding="utf-8") as inputfile: assert "RMS_SEED" in inputfile.read() diff --git a/tests/forward_models/rft/test_gendata_rft.py b/tests/forward_models/rft/test_gendata_rft.py index bbc5e7549..005acdd4b 100644 --- a/tests/forward_models/rft/test_gendata_rft.py +++ b/tests/forward_models/rft/test_gendata_rft.py @@ -532,8 +532,7 @@ def test_ert_setup_one_well_one_rft_point(tmpdir): # (assert on the return code further down) result = subprocess.run( ["ert", "ensemble_smoother", "--target-case", "default_%d", "config.ert"], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + capture_output=True, check=False, ) stdouterr = result.stdout.decode() + result.stderr.decode() @@ -658,8 +657,7 @@ def test_ert_setup_one_well_two_points_different_time_and_depth(tmpdir): # (assert on the return code further down) result = subprocess.run( ["ert", "ensemble_smoother", "--target-case", "default_%d", "config.ert"], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + capture_output=True, check=False, ) stdouterr = result.stdout.decode() + result.stderr.decode() @@ -701,7 +699,7 @@ def _assert_almost_equal_line_by_line(file1, file2): assert len(file1_content) == len(file2_content) - for line1, line2 in zip(file1_content, file2_content): + for line1, line2 in zip(file1_content, file2_content, strict=False): try: line1, line2 = float(line1), float(line2) except ValueError: diff --git a/tests/forward_models/rft/test_trajectory.py b/tests/forward_models/rft/test_trajectory.py index 6f534fef7..82fab22f8 100644 --- a/tests/forward_models/rft/test_trajectory.py +++ b/tests/forward_models/rft/test_trajectory.py @@ -100,7 +100,7 @@ def test_load(fname): trajectory = Trajectory.load_from_file(fname) for expected_utmx, trajectorypoint in zip( - expected_utmxs, trajectory.trajectory_points + expected_utmxs, trajectory.trajectory_points, strict=False ): assert trajectorypoint.utm_x == expected_utmx diff --git a/tests/forward_models/rft/test_well_and_time.py b/tests/forward_models/rft/test_well_and_time.py index 4aade4c55..4334d8791 100644 --- a/tests/forward_models/rft/test_well_and_time.py +++ b/tests/forward_models/rft/test_well_and_time.py @@ -58,7 +58,7 @@ def test_load(): well_times = load_and_parse_well_time_file(fname) for (exp_wname, exp_wtime, exp_report), (wname, wtime, report) in zip( - expected_results, well_times + expected_results, well_times, strict=False ): assert wname == exp_wname assert wtime == exp_wtime @@ -89,7 +89,7 @@ def test_invalid_load(): "non_existing", ] - for fname, error in zip(fnames, errors): + for fname, error in zip(fnames, errors, strict=False): with pytest.raises(argparse.ArgumentTypeError) as msgcontext: load_and_parse_well_time_file(fname) assert error in msgcontext.value.args[0] diff --git a/tests/forward_models/rft/test_zonemap.py b/tests/forward_models/rft/test_zonemap.py index eacb7e668..45875e762 100644 --- a/tests/forward_models/rft/test_zonemap.py +++ b/tests/forward_models/rft/test_zonemap.py @@ -88,7 +88,7 @@ def test_invalid_load(): "non_existing", ] - for fname, error in zip(fnames, errors): + for fname, error in zip(fnames, errors, strict=False): with pytest.raises(argparse.ArgumentTypeError) as msgcontext: ZoneMap.load_and_parse_zonemap_file(fname) assert error in msgcontext.value.args[0]