Skip to content

Commit

Permalink
issue #1083 duplicate wells (#1086)
Browse files Browse the repository at this point in the history
Fixes #1083 

# Description
When importing wells, we now make sure that when the ipf file defines 2
or more wells with the same x, y , filter_top, filter_bottom and id,
then the ID of the second well is appended with "_1", and so on for
successive findings of a well with the same characteristics (_2, _3
etc).

# Checklist

- [X] Links to correct issue
- [ ] Update changelog, if changes affect users
- [X] PR title starts with ``Issue #nr``, e.g. ``Issue #737``
- [X] Unit tests were added
- [ ] **If feature added**: Added/extended example
  • Loading branch information
luitjansl authored Jun 26, 2024
1 parent 90b245d commit 9f274bd
Show file tree
Hide file tree
Showing 2 changed files with 89 additions and 0 deletions.
36 changes: 36 additions & 0 deletions imod/formats/prj/prj.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
"""

import shlex
import textwrap
from collections import defaultdict
from datetime import datetime
from itertools import chain
Expand All @@ -15,6 +16,8 @@
import xarray as xr

import imod
import imod.logging
from imod.logging.loglevel import LogLevel

FilePath = Union[str, "PathLike[str]"]

Expand Down Expand Up @@ -784,6 +787,11 @@ def _read_package_ipf(
) -> Tuple[List[Dict[str, Any]], List[datetime]]:
out = []
repeats = []

# we will store in this set the tuples of (x, y, id, well_top, well_bot)
# which should be unique for each well
imported_wells = {}

for entry in block_content["ipf"]:
timestring = entry["time"]
layer = entry["layer"]
Expand Down Expand Up @@ -822,6 +830,34 @@ def _read_package_ipf(
if "filt_top" in row._fields and "filt_bot" in row._fields:
df_assoc["filt_top"] = row.filt_top
df_assoc["filt_bot"] = row.filt_bot

well_characteristics = (
row[1],
row[2],
path_assoc.stem,
row.filt_top,
row.filt_bot,
)
if well_characteristics not in imported_wells.keys():
imported_wells[well_characteristics] = 0
else:
suffix = imported_wells[well_characteristics] + 1
imported_wells[well_characteristics] = suffix
df_assoc["id"] = df_assoc["id"] + f"_{suffix}"

log_message = textwrap.dedent(
f"""A well with the same x, y, id, filter_top and filter_bot was already imported.
This happened at x = {row[1]}, y = { row[2]}, id = {path_assoc.stem}
Now the ID for this new well was appended with the suffix _{suffix})
"""
)

imod.logging.logger.log(
loglevel=LogLevel.WARNING,
message=log_message,
additional_depth=2,
)

dfs.append(df_assoc)
df = pd.concat(dfs, ignore_index=True, sort=False)
df["rate"] = df["rate"] * factor + addition
Expand Down
53 changes: 53 additions & 0 deletions imod/tests/test_mf6/test_import_prj.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,15 @@
import sys
from textwrap import dedent
from zipfile import ZipFile

import numpy as np
from numpy.testing import assert_allclose

import imod
from imod.data.sample_data import create_pooch_registry, load_pooch_registry
from imod.formats.prj import open_projectfile_data
from imod.logging.config import LoggerType
from imod.logging.loglevel import LogLevel

registry = create_pooch_registry()
registry = load_pooch_registry(registry)
Expand Down Expand Up @@ -200,6 +204,55 @@ def test_import_ipf(tmp_path):
assert np.all(result_snippet_1[0]["wel-3"]["dataframe"]["filt_bot"] == 6.0)


def test_import_ipf_unique_id_and_logging(tmp_path):
with ZipFile(fname_model) as archive:
archive.extractall(tmp_path)

logfile_path = tmp_path / "logfile.txt"

try:
with open(logfile_path, "w") as sys.stdout:
# start logging
imod.logging.configure(
LoggerType.PYTHON,
log_level=LogLevel.WARNING,
add_default_file_handler=False,
add_default_stream_handler=True,
)
projects_file = tmp_path / "iMOD5_model_pooch" / "iMOD5_model.prj"

file1 = open(projects_file, "w")
file1.write(
snippet_gen_import_ipf(
factor1=2.0, addition1=1.3, factor2=-1.0, addition2=0.0
)
)
file1.close()

# Act
result_snippet_1 = open_projectfile_data(projects_file)
finally:
# turn the logger off again
imod.logging.configure(
LoggerType.NULL,
log_level=LogLevel.WARNING,
add_default_file_handler=False,
add_default_stream_handler=False,
)

# test that id's were made unique
# Assert
assert np.all(result_snippet_1[0]["wel-1"]["dataframe"]["id"] == "extractions")
assert np.all(result_snippet_1[0]["wel-2"]["dataframe"]["id"] == "extractions_1")
assert np.all(result_snippet_1[0]["wel-3"]["dataframe"]["id"] == "extractions_2")

with open(logfile_path, "r") as log_file:
log = log_file.read()
assert "This happened at x = 197910, y = 362860, id = extractions" in log
assert "appended with the suffix _1" in log
assert "appended with the suffix _2" in log


def snippet_boundary_condition(factor: float, addition: float):
return dedent(f"""\
0001,(CHD),1, Constant Head
Expand Down

0 comments on commit 9f274bd

Please sign in to comment.