From 63c431796d730cb9ee4d3a4b47ac7ca226493569 Mon Sep 17 00:00:00 2001 From: Lukas Riedel <34276446+peanutfun@users.noreply.github.com> Date: Tue, 4 Jul 2023 13:36:54 +0200 Subject: [PATCH 1/8] Remove all uses of Tag from climada/engine --- climada/engine/impact.py | 114 ++++++----------------------- climada/engine/impact_data.py | 9 --- climada/engine/test/test_impact.py | 40 +--------- 3 files changed, 22 insertions(+), 141 deletions(-) diff --git a/climada/engine/impact.py b/climada/engine/impact.py index 84840c613..39c97767d 100644 --- a/climada/engine/impact.py +++ b/climada/engine/impact.py @@ -51,7 +51,6 @@ import climada.util.dates_times as u_dt import climada.util.plot as u_plot from climada.util.select import get_attributes_with_matching_dimension -from climada.util.tag import Tag LOGGER = logging.getLogger(__name__) @@ -88,9 +87,6 @@ class Impact(): imp_mat : sparse.csr_matrix matrix num_events x num_exp with impacts. only filled if save_mat is True in calc() - tag : dict - dictionary of tags of exposures, impact functions set and - hazard: {'exp': Tag(), 'impf_set': Tag(), 'haz': Tag()} haz_type : str the hazard type of the hazard """ @@ -109,7 +105,6 @@ def __init__(self, aai_agg=0, unit='', imp_mat=None, - tag=None, haz_type=''): """ Init Impact object @@ -145,15 +140,11 @@ def __init__(self, value unit used (given by exposures unit) imp_mat : sparse.csr_matrix, optional matrix num_events x num_exp with impacts. - tag : dict, optional - dictionary of tags of exposures, impact functions set and - hazard: {'exp': Tag(), 'impf_set': Tag(), 'haz': Tag()} haz_type : str, optional the hazard type """ self.haz_type = haz_type - self.tag = tag or {} self.event_id = np.array([], int) if event_id is None else event_id self.event_name = [] if event_name is None else event_name self.date = np.array([], int) if date is None else date @@ -260,10 +251,6 @@ def from_eih(cls, exposures, impfset, hazard, at_event = at_event, aai_agg = aai_agg, imp_mat = imp_mat if imp_mat is not None else sparse.csr_matrix((0, 0)), - tag = {'exp': exposures.tag, - 'impf_set': impfset.tag, - 'haz': hazard.tag - }, haz_type = hazard.haz_type, ) @@ -543,7 +530,6 @@ def calc_freq_curve(self, return_per=None): ifc_impact = interp_imp return ImpactFreqCurve( - tag=self.tag, return_per=ifc_return_per, impact=ifc_impact, unit=self.unit, @@ -885,14 +871,10 @@ def write_csv(self, file_name): LOGGER.info('Writing %s', file_name) with open(file_name, "w", encoding='utf-8') as imp_file: imp_wr = csv.writer(imp_file) - imp_wr.writerow(["tag_hazard", "tag_exposure", "tag_impact_func", - "unit", "tot_value", "aai_agg", "event_id", + imp_wr.writerow(["haz_type", "unit", "tot_value", "aai_agg", "event_id", "event_name", "event_date", "event_frequency", "frequency_unit", "at_event", "eai_exp", "exp_lat", "exp_lon", "exp_crs"]) - csv_data = [[[self.haz_type], [self.tag['haz'].file_name], - [self.tag['haz'].description]], - [[self.tag['exp'].file_name], [self.tag['exp'].description]], - [[self.tag['impf_set'].file_name], [self.tag['impf_set'].description]], + csv_data = [[self.haz_type], [self.unit], [self._tot_value], [self.aai_agg], self.event_id, self.event_name, self.date, self.frequency, [self.frequency_unit], self.at_event, @@ -920,32 +902,26 @@ def write_col(i_col, imp_ws, xls_data): imp_wb = xlsxwriter.Workbook(file_name) imp_ws = imp_wb.add_worksheet() - header = ["tag_hazard", "tag_exposure", "tag_impact_func", - "unit", "tot_value", "aai_agg", "event_id", + header = ["haz_type", "unit", "tot_value", "aai_agg", "event_id", "event_name", "event_date", "event_frequency", "frequency_unit", "at_event", "eai_exp", "exp_lat", "exp_lon", "exp_crs"] for icol, head_dat in enumerate(header): imp_ws.write(0, icol, head_dat) - data = [str(self.haz_type), str(self.tag['haz'].file_name), - str(self.tag['haz'].description)] + data = [str(self.haz_type)] write_col(0, imp_ws, data) - data = [str(self.tag['exp'].file_name), str(self.tag['exp'].description)] - write_col(1, imp_ws, data) - data = [str(self.tag['impf_set'].file_name), str(self.tag['impf_set'].description)] - write_col(2, imp_ws, data) - write_col(3, imp_ws, [self.unit]) - write_col(4, imp_ws, [self._tot_value]) - write_col(5, imp_ws, [self.aai_agg]) - write_col(6, imp_ws, self.event_id) - write_col(7, imp_ws, self.event_name) - write_col(8, imp_ws, self.date) - write_col(9, imp_ws, self.frequency) - write_col(10, imp_ws, [self.frequency_unit]) - write_col(11, imp_ws, self.at_event) - write_col(12, imp_ws, self.eai_exp) - write_col(13, imp_ws, self.coord_exp[:, 0]) - write_col(14, imp_ws, self.coord_exp[:, 1]) - write_col(15, imp_ws, [str(self.crs)]) + write_col(1, imp_ws, [self.unit]) + write_col(2, imp_ws, [self._tot_value]) + write_col(3, imp_ws, [self.aai_agg]) + write_col(4, imp_ws, self.event_id) + write_col(5, imp_ws, self.event_name) + write_col(6, imp_ws, self.date) + write_col(7, imp_ws, self.frequency) + write_col(8, imp_ws, [self.frequency_unit]) + write_col(9, imp_ws, self.at_event) + write_col(10, imp_ws, self.eai_exp) + write_col(11, imp_ws, self.coord_exp[:, 0]) + write_col(12, imp_ws, self.coord_exp[:, 1]) + write_col(13, imp_ws, [str(self.crs)]) imp_wb.close() @@ -1023,11 +999,6 @@ def write_dict(group, name, value): for key, val in value.items(): write(group, key, val) - def write_tag(group, name, value): - """Write a tag object using the dict writer""" - group = group.create_group(name) # name is 'exp', 'haz', 'impf_set' - value.to_hdf5(group) # value is a Tag - def _write_csr_dense(group, name, value): """Write a CSR Matrix in dense format""" group.create_dataset(name, data=value.toarray()) @@ -1052,7 +1023,6 @@ def write_csr(group, name, value): # 2) Anything is 'object', so this serves as fallback/default. type_writers = { str: write_attribute, - Tag: write_tag, dict: write_dict, sparse.csr_matrix: write_csr, Collection: write_dataset, @@ -1107,7 +1077,7 @@ def from_csv(cls, file_name): # pylint: disable=no-member LOGGER.info('Reading %s', file_name) imp_df = pd.read_csv(file_name) - imp = cls(haz_type=str(imp_df.tag_hazard[0])) + imp = cls(haz_type=imp_df.haz_type[0]) imp.unit = imp_df.unit[0] imp.tot_value = imp_df.tot_value[0] imp.aai_agg = imp_df.aai_agg[0] @@ -1128,12 +1098,7 @@ def from_csv(cls, file_name): imp.crs = u_coord.to_crs_user_input(imp_df.exp_crs.values[0]) except AttributeError: imp.crs = DEF_CRS - imp.tag['haz'] = Tag(str(imp_df.tag_hazard[1]), - str(imp_df.tag_hazard[2])) - imp.tag['exp'] = Tag(str(imp_df.tag_exposure[0]), - str(imp_df.tag_exposure[1])) - imp.tag['impf_set'] = Tag(str(imp_df.tag_impact_func[0]), - str(imp_df.tag_impact_func[1])) + return imp def read_csv(self, *args, **kwargs): @@ -1158,16 +1123,7 @@ def from_excel(cls, file_name): """ LOGGER.info('Reading %s', file_name) dfr = pd.read_excel(file_name) - imp = cls(haz_type=str(dfr['tag_hazard'][0])) - imp.tag['haz'] = Tag( - file_name = dfr['tag_hazard'][1], - description = dfr['tag_hazard'][2]) - imp.tag['exp'] = Tag() - imp.tag['exp'].file_name = dfr['tag_exposure'][0] - imp.tag['exp'].description = dfr['tag_exposure'][1] - imp.tag['impf_set'] = Tag() - imp.tag['impf_set'].file_name = dfr['tag_impact_func'][0] - imp.tag['impf_set'].description = dfr['tag_impact_func'][1] + imp = cls(haz_type=str(dfr['haz_type'][0])) imp.unit = dfr.unit[0] imp.tot_value = dfr.tot_value[0] @@ -1215,20 +1171,6 @@ def from_hdf5(cls, file_path: Union[str, Path]): ├─ event_name ├─ frequency ├─ imp_mat - ├─ tag/ - │ ├─ exp/ - │ │ ├─ .attrs/ - │ │ │ ├─ file_name - │ │ │ ├─ description - │ ├─ haz/ - │ │ ├─ .attrs/ - │ │ │ ├─ haz_type - │ │ │ ├─ file_name - │ │ │ ├─ description - │ ├─ impf_set/ - │ │ ├─ .attrs/ - │ │ │ ├─ file_name - │ │ │ ├─ description ├─ .attrs/ │ ├─ aai_agg │ ├─ crs @@ -1300,13 +1242,6 @@ def from_hdf5(cls, file_path: Union[str, Path]): # pylint: disable=no-member kwargs["event_name"] = list(file["event_name"].asstr()[:]) - # Tags - if "tag" in file: - tag_group = file["tag"] - # the tag group has tags for 'exp', 'haz' and 'impf_set' - tag_kwargs = {tag: Tag.from_hdf5(tag_group[tag]) for tag in tag_group.keys()} - - kwargs["tag"] = tag_kwargs # Create the impact object return cls(**kwargs) @@ -1476,7 +1411,6 @@ def _build_exp(self): crs=self.crs, value_unit=self.unit, ref_year=0, - tag=Tag(), meta=None ) @@ -1498,7 +1432,6 @@ def _build_exp_event(self, event_id): crs=self.crs, value_unit=self.unit, ref_year=0, - tag=Tag(), meta=None ) @@ -1719,7 +1652,7 @@ def concat(cls, imp_list: Iterable, reset_event_ids: bool = False): ``frequency``, ``imp_mat``, ``at_event``, - sums up the values of attributes ``eai_exp``, ``aai_exp`` - and takes the following attributes from the first impact object in the passed - impact list: ``coord_exp``, ``crs``, ``unit``, ``tot_value``, ``tag``, + impact list: ``coord_exp``, ``crs``, ``unit``, ``tot_value``, ``frequency_unit`` If event ids are not unique among the passed impact objects an error is raised. @@ -1806,7 +1739,6 @@ def stack_attribute(attr_name: str) -> np.ndarray: eai_exp=np.nansum([imp.eai_exp for imp in imp_list], axis=0), aai_agg=np.nansum([imp.aai_agg for imp in imp_list]), imp_mat=imp_mat, - tag=first_imp.tag, haz_type=first_imp.haz_type, frequency_unit=first_imp.frequency_unit, **kwargs, @@ -1850,10 +1782,6 @@ class ImpactFreqCurve(): """Impact exceedence frequency curve. """ - tag : dict = field(default_factory=dict) - """dictionary of tags of exposures, impact functions set and - hazard: {'exp': Tag(), 'impf_set': Tag(), 'haz': Tag()}""" - return_per : np.array = np.array([]) """return period""" diff --git a/climada/engine/impact_data.py b/climada/engine/impact_data.py index 2b5bc8410..f8cde2838 100644 --- a/climada/engine/impact_data.py +++ b/climada/engine/impact_data.py @@ -30,7 +30,6 @@ from climada.util.constants import DEF_CRS import climada.util.coordinates as u_coord from climada.engine import Impact -from climada.util.tag import Tag LOGGER = logging.getLogger(__name__) @@ -956,14 +955,6 @@ def emdat_to_impact(emdat_file_csv, hazard_type_climada, year_range=None, countr # Inititate Impact-instance: impact_instance = Impact(haz_type=hazard_type_climada) - impact_instance.tag = dict() - impact_instance.tag['haz'] = Tag(file_name=emdat_file_csv, - description='EM-DAT impact, direct import') - impact_instance.tag['exp'] = Tag(file_name=emdat_file_csv, - description='EM-DAT impact, direct import') - impact_instance.tag['impf_set'] = Tag(file_name=None, description=None) - - # Load EM-DAT impact data by event: em_data = emdat_impact_event(emdat_file_csv, countries=countries, hazard=hazard_type_emdat, year_range=year_range, reference_year=reference_year, diff --git a/climada/engine/test/test_impact.py b/climada/engine/test/test_impact.py index 70b870fe2..4db50d6c4 100644 --- a/climada/engine/test/test_impact.py +++ b/climada/engine/test/test_impact.py @@ -28,7 +28,6 @@ from pyproj import CRS from rasterio.crs import CRS as rCRS -from climada.util.tag import Tag from climada.entity.entity_def import Entity from climada.hazard.base import Hazard from climada.engine import Impact, ImpactCalc @@ -65,11 +64,6 @@ def dummy_impact(): imp_mat=sparse.csr_matrix( np.array([[0, 0], [1, 1], [2, 2], [3, 3], [30, 30], [31, 31]]) ), - tag={ - "exp": Tag("file_exp.p", "descr exp"), - "haz": Tag("file_haz.p", "descr haz"), - "impf_set": Tag(), - }, haz_type="TC", ) @@ -370,9 +364,6 @@ def test_write_read_ev_test(self): num_ev = 10 num_exp = 5 imp_write = Impact(haz_type='TC') - imp_write.tag = {'exp': Tag('file_exp.p', 'descr exp'), - 'haz': Tag('file_haz.p', 'descr haz'), - 'impf_set': Tag()} imp_write.event_id = np.arange(num_ev) imp_write.event_name = ['event_' + str(num) for num in imp_write.event_id] imp_write.date = np.ones(num_ev) @@ -410,9 +401,6 @@ def test_write_read_exp_test(self): num_ev = 5 num_exp = 10 imp_write = Impact(haz_type='TC') - imp_write.tag = {'exp': Tag('file_exp.p', 'descr exp'), - 'haz': Tag('file_haz.p', 'descr haz'), - 'impf_set': Tag()} imp_write.event_id = np.arange(num_ev) imp_write.event_name = ['event_' + str(num) for num in imp_write.event_id] imp_write.date = np.ones(num_ev) @@ -950,11 +938,6 @@ def _compare_file_to_imp(self, filepath, impact, dense_imp_mat): self.assertEqual(file.attrs["aai_agg"], impact.aai_agg) self.assertEqual(file.attrs["frequency_unit"], impact.frequency_unit) - for tagtype in ["exp", "haz", "impf_set"]: - self.assertDictEqual( - Tag.from_hdf5(file["tag"][tagtype]).__dict__, impact.tag[tagtype].__dict__ - ) - if dense_imp_mat: npt.assert_array_equal(file["imp_mat"], impact.imp_mat.toarray()) else: @@ -972,11 +955,7 @@ def _compare_impacts(self, impact_1, impact_2): for name, value in impact_1.__dict__.items(): self.assertIn(name, impact_2.__dict__) value_comp = getattr(impact_2, name) - # NOTE: Tags do not compare - if name == "tag": - for key in value: - self.assertDictEqual(value[key].__dict__, value_comp[key].__dict__) - elif isinstance(value, sparse.csr_matrix): + if isinstance(value, sparse.csr_matrix): npt.assert_array_equal(value.toarray(), value_comp.toarray()) elif np.ndim(value) > 0: npt.assert_array_equal(value, value_comp) @@ -1035,7 +1014,6 @@ def test_read_hdf5_minimal(self): self.assertEqual(impact.tot_value, 0) self.assertEqual(impact.aai_agg, 0) self.assertEqual(impact.unit, "") - self.assertEqual(impact.tag, {}) self.assertEqual(impact.haz_type, "") def test_read_hdf5_full(self): @@ -1055,14 +1033,6 @@ def test_read_hdf5_full(self): aai_agg = 200 unit = "unit" haz_type="haz_type" - haz_tag = dict(file_name=["file_name"], description=["description"]) - exp_tag = dict(file_name=["exp"], description=["exp"]) - impf_set_tag = dict(file_name=["impf_set"], description=["impf_set"]) - - def write_tag(group, tag_kwds): - for key, value in tag_kwds.items(): - array = group.create_dataset(key, (1,0), STR_DT) - array[0] = value # Write the data with h5py.File(self.filepath, "w") as file: @@ -1081,11 +1051,6 @@ def write_tag(group, tag_kwds): file.attrs["tot_value"] = tot_value file.attrs["aai_agg"] = aai_agg file.attrs["unit"] = unit - for group, kwds in zip( - ("haz", "exp", "impf_set"), (haz_tag, exp_tag, impf_set_tag) - ): - taghdf5 = file.create_group(f"tag/{group}") - Tag(**kwds).to_hdf5(taghdf5) file.attrs["haz_type"] = haz_type # Load and check @@ -1104,9 +1069,6 @@ def write_tag(group, tag_kwds): self.assertEqual(impact.tot_value, tot_value) self.assertEqual(impact.aai_agg, aai_agg) self.assertEqual(impact.unit, unit) - self.assertEqual(impact.tag["haz"].__dict__, haz_tag) - self.assertEqual(impact.tag["exp"].__dict__, exp_tag) - self.assertEqual(impact.tag["impf_set"].__dict__, impf_set_tag) self.assertEqual(impact.haz_type, haz_type) # Check with sparse From 8f2c707e02f1e0f417ca0b7d6d967192fffb6b96 Mon Sep 17 00:00:00 2001 From: Lukas Riedel <34276446+peanutfun@users.noreply.github.com> Date: Tue, 4 Jul 2023 13:47:56 +0200 Subject: [PATCH 2/8] Remove tag usage from yearsets and measures tests --- climada/entity/measures/test/test_base.py | 6 ------ climada/util/yearsets.py | 2 -- 2 files changed, 8 deletions(-) diff --git a/climada/entity/measures/test/test_base.py b/climada/entity/measures/test/test_base.py index c20df313a..7b6d7b399 100644 --- a/climada/entity/measures/test/test_base.py +++ b/climada/entity/measures/test/test_base.py @@ -397,9 +397,6 @@ def test_calc_impact_pass(self): self.assertTrue(np.array_equal(imp.event_id, hazard.event_id)) self.assertTrue(np.array_equal(imp.date, hazard.date)) self.assertEqual(imp.event_name, hazard.event_name) - self.assertEqual(imp.tag['exp'].file_name, entity.exposures.tag.file_name) - self.assertEqual(imp.tag['haz'].file_name, hazard.tag.file_name) - self.assertEqual(imp.tag['impf_set'].file_name, entity.impact_funcs.tag.file_name) self.assertEqual(risk_transf.aai_agg, 0) @@ -438,9 +435,6 @@ def test_calc_impact_transf_pass(self): self.assertTrue(np.array_equal(imp.event_id, hazard.event_id)) self.assertTrue(np.array_equal(imp.date, hazard.date)) self.assertEqual(imp.event_name, hazard.event_name) - self.assertEqual(imp.tag['exp'].file_name, entity.exposures.tag.file_name) - self.assertEqual(imp.tag['haz'].file_name, hazard.tag.file_name) - self.assertEqual(imp.tag['impf_set'].file_name, entity.impact_funcs.tag.file_name) self.assertEqual(risk_transf.aai_agg, 2.3139691495470852e+08) # Execute Tests diff --git a/climada/util/yearsets.py b/climada/util/yearsets.py index d32141dca..0c3b0033f 100755 --- a/climada/util/yearsets.py +++ b/climada/util/yearsets.py @@ -82,7 +82,6 @@ def impact_yearset(imp, sampled_years, lam=None, correction_fac=True, seed=None) #save calculations in yimp yimp.event_id = np.arange(1, n_sampled_years+1) - yimp.tag['yimp object'] = True yimp.date = u_dt.str_to_date([str(date) + '-01-01' for date in sampled_years]) yimp.frequency = np.ones(n_sampled_years)*sum(len(row) for row in sampling_vect )/n_sampled_years @@ -139,7 +138,6 @@ def impact_yearset_from_sampling_vect(imp, sampled_years, sampling_vect, correct yimp.at_event = imp_per_year n_sampled_years = len(sampled_years) yimp.event_id = np.arange(1, n_sampled_years+1) - yimp.tag['yimp object'] = True yimp.date = u_dt.str_to_date([str(date) + '-01-01' for date in sampled_years]) yimp.frequency = np.ones(n_sampled_years)*sum(len(row) for row in sampling_vect )/n_sampled_years From f8298102515f0c683c14ba4d193eb1fabdfafb05 Mon Sep 17 00:00:00 2001 From: Lukas Riedel <34276446+peanutfun@users.noreply.github.com> Date: Tue, 4 Jul 2023 13:58:15 +0200 Subject: [PATCH 3/8] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d7ab9e784..2bb90c2c4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,8 +68,8 @@ Removed: ### Removed - `Centroids.set_raster_from_pix_bounds` [#721](https://github.com/CLIMADA-project/climada_python/pull/721) - - `requirements/env_developer.yml` environment specs. Use 'extra' requirements when installing the Python package instead [#712](https://github.com/CLIMADA-project/climada_python/pull/712) +- `Impact.tag` attribute. This change is not backwards-compatible with respect to the files written and read by the `Impact` class [#743](https://github.com/CLIMADA-project/climada_python/pull/743) ## v3.3.2 From de5704e07a353d10063eb647d990f1f9d4d990ef Mon Sep 17 00:00:00 2001 From: Lukas Riedel <34276446+peanutfun@users.noreply.github.com> Date: Wed, 5 Jul 2023 11:03:31 +0200 Subject: [PATCH 4/8] Make sure Impact.haz_type is cycled through H5 --- climada/engine/impact.py | 1 + climada/engine/test/test_impact.py | 1 + 2 files changed, 2 insertions(+) diff --git a/climada/engine/impact.py b/climada/engine/impact.py index 39c97767d..845b658e6 100644 --- a/climada/engine/impact.py +++ b/climada/engine/impact.py @@ -1175,6 +1175,7 @@ def from_hdf5(cls, file_path: Union[str, Path]): │ ├─ aai_agg │ ├─ crs │ ├─ frequency_unit + │ ├─ haz_type │ ├─ tot_value │ ├─ unit diff --git a/climada/engine/test/test_impact.py b/climada/engine/test/test_impact.py index 4db50d6c4..0ab8739a1 100644 --- a/climada/engine/test/test_impact.py +++ b/climada/engine/test/test_impact.py @@ -937,6 +937,7 @@ def _compare_file_to_imp(self, filepath, impact, dense_imp_mat): self.assertEqual(file.attrs["unit"], impact.unit) self.assertEqual(file.attrs["aai_agg"], impact.aai_agg) self.assertEqual(file.attrs["frequency_unit"], impact.frequency_unit) + self.assertEqual(file.attrs["haz_type"], impact.haz_type) if dense_imp_mat: npt.assert_array_equal(file["imp_mat"], impact.imp_mat.toarray()) From b896a4fceb5b813a6236ded3188d25a6d80778a4 Mon Sep 17 00:00:00 2001 From: Lukas Riedel <34276446+peanutfun@users.noreply.github.com> Date: Wed, 5 Jul 2023 11:04:03 +0200 Subject: [PATCH 5/8] Remove unused import --- climada/engine/impact.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/climada/engine/impact.py b/climada/engine/impact.py index 845b658e6..726e68897 100644 --- a/climada/engine/impact.py +++ b/climada/engine/impact.py @@ -21,7 +21,7 @@ __all__ = ['ImpactFreqCurve', 'Impact'] -from dataclasses import dataclass, field +from dataclasses import dataclass import logging import copy import csv From 4996c0ec1cf0d62be75c22d2764cc3f3d0c71387 Mon Sep 17 00:00:00 2001 From: Lukas Riedel <34276446+peanutfun@users.noreply.github.com> Date: Wed, 5 Jul 2023 11:05:39 +0200 Subject: [PATCH 6/8] Update climada/engine/impact.py Co-authored-by: Chahan M. Kropf --- climada/engine/impact.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/climada/engine/impact.py b/climada/engine/impact.py index 39c97767d..0621e7fd2 100644 --- a/climada/engine/impact.py +++ b/climada/engine/impact.py @@ -1653,7 +1653,7 @@ def concat(cls, imp_list: Iterable, reset_event_ids: bool = False): - sums up the values of attributes ``eai_exp``, ``aai_exp`` - and takes the following attributes from the first impact object in the passed impact list: ``coord_exp``, ``crs``, ``unit``, ``tot_value``, - ``frequency_unit`` + ``frequency_unit``, ``haz_type`` If event ids are not unique among the passed impact objects an error is raised. In this case, the user can set ``reset_event_ids=True`` to create unique event ids From 3ca650cf0d005b25a2dc38941e5586e29970ab8a Mon Sep 17 00:00:00 2001 From: Lukas Riedel <34276446+peanutfun@users.noreply.github.com> Date: Mon, 10 Jul 2023 09:48:27 +0200 Subject: [PATCH 7/8] Remove `impfset` argument from `Impact.from_eih` --- climada/engine/impact.py | 6 ++++-- climada/engine/impact_calc.py | 10 +++++----- climada/engine/test/test_impact.py | 6 ++---- climada/engine/test/test_impact_calc.py | 10 ++++------ 4 files changed, 15 insertions(+), 17 deletions(-) diff --git a/climada/engine/impact.py b/climada/engine/impact.py index 42cf2bc7e..c8ec350f8 100644 --- a/climada/engine/impact.py +++ b/climada/engine/impact.py @@ -207,11 +207,13 @@ def calc(self, exposures, impact_funcs, hazard, save_mat=False, assign_centroids #TODO: new name @classmethod - def from_eih(cls, exposures, impfset, hazard, - at_event, eai_exp, aai_agg, imp_mat=None): + def from_eih(cls, exposures, hazard, at_event, eai_exp, aai_agg, imp_mat=None): """ Set Impact attributes from precalculated impact metrics. + .. versionchanged:: 3.3 + The ``impfset`` argument was removed. + Parameters ---------- exposures : climada.entity.Exposures diff --git a/climada/engine/impact_calc.py b/climada/engine/impact_calc.py index 6040a5731..40bec773d 100644 --- a/climada/engine/impact_calc.py +++ b/climada/engine/impact_calc.py @@ -175,9 +175,8 @@ def _return_impact(self, imp_mat_gen, save_mat): imp_mat = None at_event, eai_exp, aai_agg = self.stitch_risk_metrics(imp_mat_gen) return Impact.from_eih( - self.exposures, self.impfset, self.hazard, - at_event, eai_exp, aai_agg, imp_mat - ) + self.exposures, self.hazard, at_event, eai_exp, aai_agg, imp_mat + ) def _return_empty(self, save_mat): """ @@ -202,8 +201,9 @@ def _return_empty(self, save_mat): ) else: imp_mat = None - return Impact.from_eih(self.exposures, self.impfset, self.hazard, - at_event, eai_exp, aai_agg, imp_mat) + return Impact.from_eih( + self.exposures, self.hazard, at_event, eai_exp, aai_agg, imp_mat + ) def minimal_exp_gdf(self, impf_col, assign_centroids, ignore_cover, ignore_deductible): """Get minimal exposures geodataframe for impact computation diff --git a/climada/engine/test/test_impact.py b/climada/engine/test/test_impact.py index 0ab8739a1..698a8bcd6 100644 --- a/climada/engine/test/test_impact.py +++ b/climada/engine/test/test_impact.py @@ -77,8 +77,7 @@ def test_from_eih_pass(self): fake_eai_exp = np.arange(len(exp.gdf)) fake_at_event = np.arange(HAZ.size) fake_aai_agg = np.sum(fake_eai_exp) - imp = Impact.from_eih(exp, ENT.impact_funcs, HAZ, - fake_at_event, fake_eai_exp, fake_aai_agg) + imp = Impact.from_eih(exp, HAZ, fake_at_event, fake_eai_exp, fake_aai_agg) self.assertEqual(imp.crs, exp.crs) self.assertEqual(imp.aai_agg, fake_aai_agg) self.assertEqual(imp.imp_mat.size, 0) @@ -900,8 +899,7 @@ def test_match_centroids(self): fake_eai_exp = np.arange(len(exp.gdf)) fake_at_event = np.arange(HAZ.size) fake_aai_agg = np.sum(fake_eai_exp) - imp = Impact.from_eih(exp, ENT.impact_funcs, HAZ, - fake_at_event, fake_eai_exp, fake_aai_agg) + imp = Impact.from_eih(exp, HAZ, fake_at_event, fake_eai_exp, fake_aai_agg) imp_centr = imp.match_centroids(HAZ) np.testing.assert_array_equal(imp_centr, exp.gdf.centr_TC) diff --git a/climada/engine/test/test_impact_calc.py b/climada/engine/test/test_impact_calc.py index 65f2925d8..68b61c39a 100644 --- a/climada/engine/test/test_impact_calc.py +++ b/climada/engine/test/test_impact_calc.py @@ -686,7 +686,6 @@ def test_save_mat(self, from_eih_mock): self.icalc._return_impact(self.imp_mat_gen, save_mat=True) from_eih_mock.assert_called_once_with( ENT.exposures, - ENT.impact_funcs, HAZ, "at_event", "eai_exp", @@ -707,11 +706,10 @@ def test_skip_mat(self, from_eih_mock): # Need to check every argument individually due to the last one being a matrix call_args = from_eih_mock.call_args.args self.assertEqual(call_args[0], ENT.exposures) - self.assertEqual(call_args[1], ENT.impact_funcs) - self.assertEqual(call_args[2], HAZ) - self.assertEqual(call_args[3], "at_event") - self.assertEqual(call_args[4], "eai_exp") - self.assertEqual(call_args[5], "aai_agg") + self.assertEqual(call_args[1], HAZ) + self.assertEqual(call_args[2], "at_event") + self.assertEqual(call_args[3], "eai_exp") + self.assertEqual(call_args[4], "aai_agg") np.testing.assert_array_equal( from_eih_mock.call_args.args[-1], sparse.csr_matrix((0, 0)).toarray() ) From 7b6f65d191cbdf5845b346ec25e35554d4f4385a Mon Sep 17 00:00:00 2001 From: Lukas Riedel <34276446+peanutfun@users.noreply.github.com> Date: Mon, 10 Jul 2023 09:54:39 +0200 Subject: [PATCH 8/8] Remove mention of tag in impact tutorial --- doc/tutorial/climada_engine_Impact.ipynb | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/tutorial/climada_engine_Impact.ipynb b/doc/tutorial/climada_engine_Impact.ipynb index 1250ce1df..7ed751e41 100644 --- a/doc/tutorial/climada_engine_Impact.ipynb +++ b/doc/tutorial/climada_engine_Impact.ipynb @@ -65,7 +65,6 @@ "source": [ "| Attributes from input | Data Type | Description|\n", "| :- | :- | :- |\n", - "| tag |(dict)| dictionary storing the tags of the inputs (Exposure.tag, ImpactFuncSet.tag Hazard.tag)|\n", "| event_id |list(int)| id (>0) of each hazard event (Hazard.event_id)|\n", "| event_name |(list(str))| name of each event (Hazard.event_name)|\n", "| date |np.array| date of events (Hazard.date)|\n",