Skip to content

Commit

Permalink
Merge branch 'main' into headwater
Browse files Browse the repository at this point in the history
  • Loading branch information
DirkEilander committed Mar 12, 2024
2 parents 7b27254 + 04819fd commit 224f85c
Show file tree
Hide file tree
Showing 5 changed files with 26 additions and 12 deletions.
2 changes: 1 addition & 1 deletion hydromt_sfincs/regulargrid.py
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,7 @@ def create_index_tiles(
Format of index tiles, either "bin" (binary, default) or "png"
"""

index_path = os.path.join(root, "index")
index_path = os.path.join(root, "indices")
npix = 256

# for binary format, use .dat extension
Expand Down
22 changes: 13 additions & 9 deletions hydromt_sfincs/sfincs.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,8 +239,10 @@ def setup_grid_from_region(
* {'bbox': [xmin, ymin, xmax, ymax]}
* {'geom': 'path/to/polygon_geometry'}
Note: For the 'bbox' option the coordinates need to be provided in WG84/EPSG:4326.
For a complete overview of all region options,
see :py:function:~hydromt.workflows.basin_mask.parse_region
see :py:func:`hydromt.workflows.basin_mask.parse_region`
res : float, optional
grid resolution, by default 100 m
crs : Union[str, int], optional
Expand Down Expand Up @@ -331,7 +333,7 @@ def setup_dep(
datasets_dep : List[dict]
List of dictionaries with topobathy data, each containing a dataset name or Path (elevtn) and optional merge arguments e.g.:
[{'elevtn': merit_hydro, 'zmin': 0.01}, {'elevtn': gebco, 'offset': 0, 'merge_method': 'first', 'reproj_method': 'bilinear'}]
For a complete overview of all merge options, see :py:function:~hydromt.workflows.merge_multi_dataarrays
For a complete overview of all merge options, see :py:func:`hydromt.workflows.merge_multi_dataarrays`
buffer_cells : int, optional
Number of cells between datasets to ensure smooth transition of bed levels, by default 0
interp_method : str, optional
Expand Down Expand Up @@ -641,7 +643,7 @@ def setup_subgrid(
or xarray raster object ('elevtn').
Optional merge arguments include: 'zmin', 'zmax', 'mask', 'offset', 'reproj_method',
and 'merge_method', see example below. For a complete overview of all merge options,
see :py:function:~hydromt.workflows.merge_multi_dataarrays
see :py:func:`hydromt.workflows.merge_multi_dataarrays`
::
Expand Down Expand Up @@ -682,7 +684,7 @@ def setup_subgrid(
segment_length [m] (default 500m) and riv_bank_q [0-1] (default 0.5)
which used to estimate the river bank height in case river depth is provided.
For more info see :py:function:~hydromt.workflows.bathymetry.burn_river_rect
For more info see :py:func:`hydromt.workflows.bathymetry.burn_river_rect`
::
Expand Down Expand Up @@ -1782,7 +1784,7 @@ def set_forcing_1d(
gdf_locs = gdf_locs.set_index(col)
self.logger.info(f"Setting gdf_locs index to {col}")
break
if not (gdf_locs.index) == set(df_ts.columns):
if not set(gdf_locs.index) == set(df_ts.columns):
gdf_locs = gdf_locs.set_index(df_ts.columns)
self.logger.info(
f"No matching index column found in gdf_locs; assuming the order is correct"
Expand Down Expand Up @@ -2262,7 +2264,7 @@ def setup_precip_forcing(self, timeseries=None, magnitude=None):
Parameters
----------
timeseries, str, Path
timeseries: str, Path
Path to tabulated timeseries csv file with time index in first column
and location IDs in the first row,
see :py:meth:`hydromt.open_timeseries_from_table`, for details.
Expand Down Expand Up @@ -2487,7 +2489,7 @@ def setup_tiles(
datasets_dep : List[dict]
List of dictionaries with topobathy data, each containing a dataset name or Path (elevtn) and optional merge arguments e.g.:
[{'elevtn': merit_hydro, 'zmin': 0.01}, {'elevtn': gebco, 'offset': 0, 'merge_method': 'first', reproj_method: 'bilinear'}]
For a complete overview of all merge options, see :py:function:~hydromt.workflows.merge_multi_dataarrays
For a complete overview of all merge options, see :py:func:`~hydromt.workflows.merge_multi_dataarrays`
Note that subgrid/dep_subgrid.tif is automatically used if present and datasets_dep is left empty.
zoom_range : Union[int, List[int]], optional
Range of zoom levels for which tiles are created, by default [0,13]
Expand Down Expand Up @@ -2562,7 +2564,7 @@ def setup_tiles(
root=path,
region=region,
datasets_dep=datasets_dep,
index_path=os.path.join(path, "index"),
index_path=os.path.join(path, "indices"),
zoom_range=zoom_range,
z_range=z_range,
fmt=fmt,
Expand Down Expand Up @@ -2928,6 +2930,8 @@ def read_geoms(self):
gdf = utils.read_drn(fn, crs=self.crs)
else:
gdf = utils.read_xy(fn, crs=self.crs)
# this seems to be required for new pandas versions
gdf.set_geometry("geometry", inplace=True)
self.set_geoms(gdf, name=gname)
# read additional geojson files from gis directory
for fn in glob.glob(join(self.root, "gis", "*.geojson")):
Expand Down Expand Up @@ -3630,7 +3634,7 @@ def _parse_datasets_rgh(self, datasets_rgh):
reclass_table = join(DATADIR, "lulc", f"{lulc}_mapping.csv")
if reclass_table is None:
raise IOError(
f"Manning roughness mapping file not found: {reclass_table}"
f"Manning roughness 'reclass_table' csv file must be provided"
)
da_lulc = self.data_catalog.get_rasterdataset(
lulc,
Expand Down
2 changes: 2 additions & 0 deletions hydromt_sfincs/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -496,6 +496,7 @@ def linestring2gdf(feats: List[Dict], crs: Union[int, CRS] = None) -> gpd.GeoDat
feat.update({"geometry": LineString(list(zip(*xyz)))})
records.append(feat)
gdf = gpd.GeoDataFrame.from_records(records)
gdf.set_geometry("geometry", inplace=True)
if crs is not None:
gdf.set_crs(crs, inplace=True)
return gdf
Expand Down Expand Up @@ -530,6 +531,7 @@ def polygon2gdf(
gdf = gpd.GeoDataFrame.from_records(records)
gdf["zmin"] = zmin
gdf["zmax"] = zmax
gdf.set_geometry("geometry", inplace=True)
if crs is not None:
gdf.set_crs(crs, inplace=True)
return gdf
Expand Down
6 changes: 5 additions & 1 deletion hydromt_sfincs/workflows/tiling.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Tiling functions for fast visualization of the SFINCS model in- and output data."""
import logging
import math
import os
from itertools import product
Expand All @@ -16,6 +17,8 @@

__all__ = ["create_topobathy_tiles", "downscale_floodmap_webmercator"]

logger = logging.getLogger(__name__)


def downscale_floodmap_webmercator(
zsmax: Union[np.array, xr.DataArray],
Expand Down Expand Up @@ -133,6 +136,7 @@ def create_topobathy_tiles(
zoom_range: Union[int, List[int]] = [0, 13],
z_range: List[int] = [-20000.0, 20000.0],
fmt="bin",
logger=logger,
):
"""Create webmercator topobathy tiles for a given region.
Expand Down Expand Up @@ -189,7 +193,7 @@ def create_topobathy_tiles(
maxx, maxy = map(min, zip(transformer.transform(maxx, maxy), [20037508.34] * 2))

for izoom in range(zoom_range[0], zoom_range[1] + 1):
print("Processing zoom level " + str(izoom))
logger.debug("Processing zoom level " + str(izoom))

zoom_path = os.path.join(topobathy_path, str(izoom))

Expand Down
6 changes: 5 additions & 1 deletion tests/test_1model_class.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,10 +107,14 @@ def test_subgrid_rivers(mod):
gdf_riv = mod.data_catalog.get_geodataframe(
"rivers_lin2019_v1", geom=mod.region, buffer=1e3
)

# create dummy depths for the river based on the width
rivdph = gdf_riv["rivwth"].values / 100
rivdph[-1] = np.nan
gdf_riv["rivdph"] = rivdph

# set the depth of the river with "COMID": 21002062 to nan
gdf_riv.loc[gdf_riv["COMID"] == 21002062, "rivdph"] = np.nan

sbg_org = mod.subgrid.copy()

mod.setup_subgrid(
Expand Down

0 comments on commit 224f85c

Please sign in to comment.