From 4ab247416be234347ce0c6924acc9835a980b82c Mon Sep 17 00:00:00 2001 From: Emanuel Schmid <51439563+emanuel-schmid@users.noreply.github.com> Date: Sun, 20 Oct 2024 19:31:34 +0200 Subject: [PATCH 1/4] Develop black (#963) * format * import Impact from climada.engine.impact in order to avoid circular imports * avoid circular imports * pre-commit run --all-files --- .github/ISSUE_TEMPLATE/bug_report.md | 4 +- .github/pull_request_template.md | 4 +- .github/scripts/make_release.py | 4 +- .github/scripts/prepare_release.py | 55 ++- .github/scripts/setup_devbranch.py | 22 +- MANIFEST.in | 2 +- climada.conf | 2 +- .../data/demo/demo_emdat_impact_data_2020.csv | 2 +- climada/data/system/GDP_TWN_IMF_WEO_data.csv | 2 +- .../system/WEALTH2GDP_factors_CRI_2016.csv | 2 +- .../data/emdat_testdata_BGD_USA_1970-2017.csv | 8 +- .../data/emdat_testdata_fake_2007-2011.csv | 2 +- climada/hazard/test/data/trac_short_test.csv | 2 +- doc/Makefile | 2 +- doc/climada/climada.engine.rst | 1 - doc/climada/climada.entity.disc_rates.rst | 1 - .../climada.entity.exposures.litpop.rst | 1 - doc/climada/climada.entity.exposures.rst | 1 - doc/climada/climada.entity.impact_funcs.rst | 1 - doc/climada/climada.entity.measures.rst | 1 - doc/climada/climada.hazard.centroids.rst | 1 - doc/climada/climada.hazard.rst | 1 - doc/climada/climada.hazard.trop_cyclone.rst | 1 - doc/climada/climada.rst | 1 - doc/climada/climada.util.rst | 1 - doc/conf.py | 135 +++--- doc/guide/Guide_Configuration.ipynb | 27 +- doc/guide/Guide_Exception_Logging.ipynb | 13 +- doc/guide/Guide_Py_Performance.ipynb | 2 + doc/guide/Guide_PythonDos-n-Donts.ipynb | 14 +- doc/guide/Guide_Testing.ipynb | 11 +- ...ontinuous_integration_GitHub_actions.ipynb | 16 +- doc/index.rst | 2 +- doc/misc/AUTHORS.md | 2 +- doc/misc/CHANGELOG.md | 2 +- doc/misc/CONTRIBUTING.md | 2 +- doc/tutorial/0_intro_python.ipynb | 205 +++++---- doc/tutorial/1_main_climada.ipynb | 76 ++-- doc/tutorial/climada_engine_CostBenefit.ipynb | 173 +++++--- doc/tutorial/climada_engine_Forecast.ipynb | 143 +++--- doc/tutorial/climada_engine_Impact.ipynb | 168 ++++--- doc/tutorial/climada_engine_impact_data.ipynb | 112 +++-- doc/tutorial/climada_engine_unsequa.ipynb | 416 +++++++++++------- .../climada_engine_unsequa_helper.ipynb | 282 +++++++----- doc/tutorial/climada_entity_DiscRates.ipynb | 19 +- doc/tutorial/climada_entity_Exposures.ipynb | 124 +++--- ...mada_entity_Exposures_polygons_lines.ipynb | 267 +++++++---- .../climada_entity_ImpactFuncSet.ipynb | 18 +- doc/tutorial/climada_entity_LitPop.ipynb | 139 +++--- doc/tutorial/climada_entity_MeasureSet.ipynb | 67 +-- doc/tutorial/climada_hazard_Hazard.ipynb | 379 +++++++++++----- doc/tutorial/climada_hazard_StormEurope.ipynb | 25 +- doc/tutorial/climada_hazard_TropCyclone.ipynb | 93 ++-- doc/tutorial/climada_util_api_client.ipynb | 83 +++- doc/tutorial/climada_util_earth_engine.ipynb | 161 +++---- doc/tutorial/climada_util_yearsets.ipynb | 28 +- .../applications/eca_san_salvador/README.txt | 2 +- .../San_Salvador_Adaptacion.ipynb | 113 +++-- .../San_Salvador_Adaptation.ipynb | 116 +++-- .../San_Salvador_Parametric.ipynb | 52 ++- .../eca_san_salvador/San_Salvador_Risk.ipynb | 91 ++-- .../eca_san_salvador/functions_ss.py | 249 +++++++---- script/jenkins/set_config.py | 8 +- script/jenkins/test_data_api.py | 67 +-- script/jenkins/test_notebooks.py | 138 +++--- 65 files changed, 2548 insertions(+), 1616 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 27760ea62b..8c086f8b92 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -12,7 +12,7 @@ A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior/error: -1. +1. Code example: ```python @@ -29,7 +29,7 @@ If applicable, add screenshots to help explain your problem. **System Information (please complete the following information):** - Operating system and version: [e.g. Ubuntu 22.04, macOS 14.3.1, Windows 10] - - Python version: [e.g. 3.10] + - Python version: [e.g. 3.10] (to obtain this information execute > import sys >print(sys.version)) **Additional context** diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index ee53282998..b1e66a5759 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,6 +1,6 @@ Changes proposed in this PR: -- -- +- +- This PR fixes # diff --git a/.github/scripts/make_release.py b/.github/scripts/make_release.py index 5c6260d4db..cdba6755ca 100644 --- a/.github/scripts/make_release.py +++ b/.github/scripts/make_release.py @@ -13,9 +13,9 @@ def get_version() -> str: """Return the current version number, based on the _version.py file.""" [version_file] = glob.glob("climada*/_version.py") - with open(version_file, 'r', encoding="UTF-8") as vfp: + with open(version_file, "r", encoding="UTF-8") as vfp: content = vfp.read() - regex = r'^__version__\s*=\s*[\'\"](.*)[\'\"]\s*$' + regex = r"^__version__\s*=\s*[\'\"](.*)[\'\"]\s*$" mtch = re.match(regex, content) return mtch.group(1) diff --git a/.github/scripts/prepare_release.py b/.github/scripts/prepare_release.py index bce483b6f8..eb0dd4c2b7 100644 --- a/.github/scripts/prepare_release.py +++ b/.github/scripts/prepare_release.py @@ -5,7 +5,7 @@ - update version numbers in _version.py and setup.py - purge the "Unreleased" section of CHANGELOG.md and rename it to the new version number -- copy the README.md file to doc/misc/README.md, +- copy the README.md file to doc/misc/README.md, but without the badges as they interfere with the sphinx doc builder All changes are immediately commited to the repository. @@ -38,28 +38,28 @@ def bump_version_number(version_number: str, level: str) -> str: """Return a copy of `version_number` with one level number incremented.""" major, minor, patch = version_number.split(".") if level == "major": - major = str(int(major)+1) + major = str(int(major) + 1) minor = "0" patch = "0" elif level == "minor": - minor = str(int(minor)+1) + minor = str(int(minor) + 1) patch = "0" elif level == "patch": - patch = str(int(patch)+1) + patch = str(int(patch) + 1) else: raise ValueError(f"level should be 'major', 'minor' or 'patch', not {level}") return ".".join([major, minor, patch]) def update_readme(_nvn): - """align doc/misc/README.md with ./README.md but remove the non-markdown header lines from """ - with open("README.md", 'r', encoding="UTF-8") as rmin: - lines = [line for line in rmin.readlines() if not line.startswith('[![')] + """align doc/misc/README.md with ./README.md but remove the non-markdown header lines from""" + with open("README.md", "r", encoding="UTF-8") as rmin: + lines = [line for line in rmin.readlines() if not line.startswith("[![")] while not lines[0].strip(): lines = lines[1:] - with open("doc/misc/README.md", 'w', encoding="UTF-8") as rmout: + with open("doc/misc/README.md", "w", encoding="UTF-8") as rmout: rmout.writelines(lines) - return GitFile('doc/misc/README.md') + return GitFile("doc/misc/README.md") def update_changelog(nvn): @@ -70,16 +70,16 @@ def update_changelog(nvn): release = [] section_name = None section = [] - with open("CHANGELOG.md", 'r', encoding="UTF-8") as changelog: + with open("CHANGELOG.md", "r", encoding="UTF-8") as changelog: for line in changelog.readlines(): - if line.startswith('#'): - if line.startswith('### '): + if line.startswith("#"): + if line.startswith("### "): if section: release.append((section_name, section)) section_name = line[4:].strip() section = [] - #print("tag:", section_name) - elif line.startswith('## '): + # print("tag:", section_name) + elif line.startswith("## "): if section: release.append((section_name, section)) if release: @@ -88,7 +88,7 @@ def update_changelog(nvn): release = [] section_name = None section = [] - #print("release:", release_name) + # print("release:", release_name) else: section.append(line) if section: @@ -96,7 +96,7 @@ def update_changelog(nvn): if release: releases.append((release_name, release)) - with open("CHANGELOG.md", 'w', encoding="UTF-8") as changelog: + with open("CHANGELOG.md", "w", encoding="UTF-8") as changelog: changelog.write("# Changelog\n\n") for release_name, release in releases: if release_name: @@ -107,7 +107,11 @@ def update_changelog(nvn): if any(ln.strip() for ln in section): if section_name: changelog.write(f"### {section_name}\n") - lines = [ln.strip() for ln in section if "code freeze date: " not in ln.lower()] + lines = [ + ln.strip() + for ln in section + if "code freeze date: " not in ln.lower() + ] if not section_name and release_name.lower() == nvn: print("setting date") for i, line in enumerate(lines): @@ -116,26 +120,26 @@ def update_changelog(nvn): lines[i] = f"Release date: {today}" changelog.write(re.sub("\n+$", "\n", "\n".join(lines))) changelog.write("\n") - return GitFile('CHANGELOG.md') + return GitFile("CHANGELOG.md") def update_version(nvn): """Update the _version.py file""" [file_with_version] = glob.glob("climada*/_version.py") - regex = r'(^__version__\s*=\s*[\'\"]).*([\'\"]\s*$)' + regex = r"(^__version__\s*=\s*[\'\"]).*([\'\"]\s*$)" return update_file(file_with_version, regex, nvn) def update_setup(new_version_number): """Update the setup.py file""" file_with_version = "setup.py" - regex = r'(^\s+version\s*=\s*[\'\"]).*([\'\"]\s*,\s*$)' + regex = r"(^\s+version\s*=\s*[\'\"]).*([\'\"]\s*,\s*$)" return update_file(file_with_version, regex, new_version_number) def update_file(file_with_version, regex, new_version_number): """Replace the version number(s) in a file, based on a rgular expression.""" - with open(file_with_version, 'r', encoding="UTF-8") as curf: + with open(file_with_version, "r", encoding="UTF-8") as curf: lines = curf.readlines() successfully_updated = False for i, line in enumerate(lines): @@ -145,14 +149,15 @@ def update_file(file_with_version, regex, new_version_number): successfully_updated = True if not successfully_updated: raise RuntimeError(f"cannot determine version of {file_with_version}") - with open(file_with_version, 'w', encoding="UTF-8") as newf: + with open(file_with_version, "w", encoding="UTF-8") as newf: for line in lines: newf.write(line) return GitFile(file_with_version) -class GitFile(): +class GitFile: """Helper class for `git add`.""" + def __init__(self, path): self.path = path @@ -166,8 +171,9 @@ def gitadd(self): ).stdout.decode("utf8") -class Git(): +class Git: """Helper class for `git commit`.""" + def __init__(self): _gitname = subprocess.run( ["git", "config", "--global", "user.name", "'climada'"], @@ -228,6 +234,7 @@ def prepare_new_release(level): if __name__ == "__main__": from sys import argv + try: LEVEL = argv[1] except IndexError: diff --git a/.github/scripts/setup_devbranch.py b/.github/scripts/setup_devbranch.py index 001390fa0c..36c9e6c78f 100644 --- a/.github/scripts/setup_devbranch.py +++ b/.github/scripts/setup_devbranch.py @@ -33,14 +33,15 @@ def get_last_version() -> str: def update_changelog(): """Insert a vanilla "Unreleased" section on top.""" - with open("CHANGELOG.md", 'r', encoding="UTF-8") as changelog: + with open("CHANGELOG.md", "r", encoding="UTF-8") as changelog: lines = changelog.readlines() if "## Unreleased" in lines: return - with open("CHANGELOG.md", 'w', encoding="UTF-8") as changelog: - changelog.write("""# Changelog + with open("CHANGELOG.md", "w", encoding="UTF-8") as changelog: + changelog.write( + """# Changelog ## Unreleased @@ -62,27 +63,28 @@ def update_changelog(): ### Removed -""") +""" + ) changelog.writelines(lines[2:]) def update_version(nvn): """Update the _version.py file""" [file_with_version] = glob.glob("climada*/_version.py") - regex = r'(^__version__\s*=\s*[\'\"]).*([\'\"]\s*$)' + regex = r"(^__version__\s*=\s*[\'\"]).*([\'\"]\s*$)" return update_file(file_with_version, regex, nvn) def update_setup(new_version_number): """Update the setup.py file""" file_with_version = "setup.py" - regex = r'(^\s+version\s*=\s*[\'\"]).*([\'\"]\s*,\s*$)' + regex = r"(^\s+version\s*=\s*[\'\"]).*([\'\"]\s*,\s*$)" return update_file(file_with_version, regex, new_version_number) def update_file(file_with_version, regex, new_version_number): """Replace the version number(s) in a file, based on a rgular expression.""" - with open(file_with_version, 'r', encoding="UTF-8") as curf: + with open(file_with_version, "r", encoding="UTF-8") as curf: lines = curf.readlines() successfully_updated = False for i, line in enumerate(lines): @@ -92,7 +94,7 @@ def update_file(file_with_version, regex, new_version_number): successfully_updated = True if not successfully_updated: raise RuntimeError(f"cannot determine version of {file_with_version}") - with open(file_with_version, 'w', encoding="UTF-8") as newf: + with open(file_with_version, "w", encoding="UTF-8") as newf: for line in lines: newf.write(line) @@ -100,10 +102,10 @@ def update_file(file_with_version, regex, new_version_number): def setup_devbranch(): """Adjust files after a release was published, i.e., apply the canonical deviations from main in develop. - + Just changes files, all `git` commands are in the setup_devbranch.sh file. """ - main_version = get_last_version().strip('v') + main_version = get_last_version().strip("v") semver = main_version.split(".") semver[-1] = f"{int(semver[-1]) + 1}-dev" dev_version = ".".join(semver) diff --git a/MANIFEST.in b/MANIFEST.in index 2c9965a945..fff806f537 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -4,4 +4,4 @@ graft climada/*/test/data graft climada/test/data graft data global-exclude .* -global-exclude *.py[co] \ No newline at end of file +global-exclude *.py[co] diff --git a/climada.conf b/climada.conf index 3d07e07ca4..367928405c 100644 --- a/climada.conf +++ b/climada.conf @@ -27,4 +27,4 @@ "supported_exposures_types": ["litpop", "crop_production", "base"] }, "log_level": "INFO" -} \ No newline at end of file +} diff --git a/climada/data/demo/demo_emdat_impact_data_2020.csv b/climada/data/demo/demo_emdat_impact_data_2020.csv index 55c72eaf4a..3cf4f5c85b 100644 --- a/climada/data/demo/demo_emdat_impact_data_2020.csv +++ b/climada/data/demo/demo_emdat_impact_data_2020.csv @@ -1073,4 +1073,4 @@ Dis No,Year,Seq,Disaster Group,Disaster Subgroup,Disaster Type,Disaster Subtype, 2020-0132-TON,2020,0132,Natural,Meteorological,Storm,Tropical cyclone,,Cyclone 'Harold',--,Tonga,TON,Polynesia,Oceania,"Tongatapu, 'Eua",,,,,,,,,Kph,,,,,2020,4,6,2020,4,9,,,1289,,1289,,,111000, 2020-0015-TUV,2020,0015,Natural,Meteorological,Storm,Tropical cyclone,,Cyclone 'Tino',Affected,Tuvalu,TUV,Polynesia,Oceania,,,,,,,Yes,,,Kph,,,,,2020,1,18,2020,1,18,,,,,,,,, 2020-0219-USA,2020,0219,Natural,Meteorological,Storm,Tropical cyclone,,Tropical storm 'Cristobal',Affected,United States of America (the),USA,Northern America,Americas,"errebonne, Plaquemines, Lafourche Parishes (Louisiana)",,,,,,Yes,,80,Kph,,,,,2020,6,7,2020,6,7,,,,,,,,, -2020-0132-VUT,2020,0132,Natural,Meteorological,Storm,Tropical cyclone,,Cyclone 'Harold',--,Vanuatu,VUT,Melanesia,Oceania,Pentecost and Espiritu Santo,,,,,,,,,Kph,,,,,2020,4,6,2020,4,9,4,,,,,,,, \ No newline at end of file +2020-0132-VUT,2020,0132,Natural,Meteorological,Storm,Tropical cyclone,,Cyclone 'Harold',--,Vanuatu,VUT,Melanesia,Oceania,Pentecost and Espiritu Santo,,,,,,,,,Kph,,,,,2020,4,6,2020,4,9,4,,,,,,,, diff --git a/climada/data/system/GDP_TWN_IMF_WEO_data.csv b/climada/data/system/GDP_TWN_IMF_WEO_data.csv index e0acd9898c..e39f4cb62c 100644 --- a/climada/data/system/GDP_TWN_IMF_WEO_data.csv +++ b/climada/data/system/GDP_TWN_IMF_WEO_data.csv @@ -3,4 +3,4 @@ TWN,Taiwan Province of China,"Gross domestic product, current prices",U.S. dolla TWN,Taiwan Province of China,"Gross domestic product, deflator",Index,,"See notes for: Gross domestic product, constant prices (National currency) Gross domestic product, current prices (National currency).",69.946,77.417,79.33,81.444,82.495,82.523,86.575,86.605,86.657,88.892,93.472,96.725,99.824,103.299,105.065,107.554,110.062,112.506,116.182,113.911,112.88,112.189,111.733,110.174,109.894,108.209,107.095,106.638,103.869,104.003,102.405,100,100.543,102.019,103.749,107.128,108.085,106.84,105.834,106.337,106.484,107.149,108.054,109.026,109.951,2018 TWN,Taiwan Province of China,"Gross domestic product per capita, current prices",U.S. dollars,Units,"See notes for: Gross domestic product, current prices (National currency) Population (Persons).","2,367.600","2,692.406","2,675.823","2,882.402","3,203.468","3,295.112","4,010.111","5,325.216","6,337.499","7,577.046","8,178.152","9,092.297","10,725.702","11,266.123","12,108.752","13,076.007","13,597.248","13,968.097","12,787.258","13,768.274","14,876.879","13,408.383","13,715.525","14,094.370","15,360.724","16,503.313","16,984.540","17,780.925","18,102.946","16,959.775","19,261.667","20,911.643","21,269.614","21,887.992","22,638.917","22,373.564","22,572.702","24,389.677","25,007.747","24,827.898","25,525.806","26,861.070","28,324.425","29,870.221","31,483.799",2018 ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -"International Monetary Fund, World Economic Outlook Database, October 2019",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \ No newline at end of file +"International Monetary Fund, World Economic Outlook Database, October 2019",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, diff --git a/climada/data/system/WEALTH2GDP_factors_CRI_2016.csv b/climada/data/system/WEALTH2GDP_factors_CRI_2016.csv index f63f0453aa..8e8bb97c90 100644 --- a/climada/data/system/WEALTH2GDP_factors_CRI_2016.csv +++ b/climada/data/system/WEALTH2GDP_factors_CRI_2016.csv @@ -169,4 +169,4 @@ Venezuela,VEN,0.29407,0.35328 Vietnam,VNM,1.23241,1.66724 Yemen,YEM,1.18584,1.76063 Zambia,ZMB,0.10663,0.32193 -Zimbabwe,ZWE,0.20161,1.65566 \ No newline at end of file +Zimbabwe,ZWE,0.20161,1.65566 diff --git a/climada/engine/test/data/emdat_testdata_BGD_USA_1970-2017.csv b/climada/engine/test/data/emdat_testdata_BGD_USA_1970-2017.csv index 5ca0ec256d..00748e54a0 100644 --- a/climada/engine/test/data/emdat_testdata_BGD_USA_1970-2017.csv +++ b/climada/engine/test/data/emdat_testdata_BGD_USA_1970-2017.csv @@ -691,7 +691,7 @@ Start date,End date,Country,ISO,Location,Latitude,Longitude,Magnitude value,Magn 02.05.02,08.05.02,United States of America (the),USA,"Pike district (Kentucky province), Virginia province",,,,Km2,Flood,Riverine flood,--,--,9,1000,13000,0,,2002-0266 05.05.02,05.05.02,United States of America (the),USA,"Happy town (Randall, Swisher districts, Texas province)",,,,Kph,Storm,Convective storm,--,--,2,183,0,0,,2002-0283 21.04.02,21.04.02,United States of America (the),USA,"Wayne, Jefferson districts (Illinois province)",,,,Kph,Storm,Convective storm,--,--,1,12,4000,0,,2002-0287 -27.04.02,03.05.02,United States of America (the),USA,"Breckinridge, Meade, Crittenden, Webster, Hopkins, Ohio, Hardin, Edmonson districts (Kentucky province), Bollinger, Howell districts (Missouri province), Charles, Calvert, Dorchester, Wicomico, Cecil districts (Maryland province), +27.04.02,03.05.02,United States of America (the),USA,"Breckinridge, Meade, Crittenden, Webster, Hopkins, Ohio, Hardin, Edmonson districts (Kentucky province), Bollinger, Howell districts (Missouri province), Charles, Calvert, Dorchester, Wicomico, Cecil districts (Maryland province), Illinois (Clay,Union, Johnson,Pope, Moultrie, Saline, Bond), Gordon district (Georgia province), Atchison district (Kansas province), Erie, Allegany districts (New York province), Stark district (Ohio province), Indiana, Mercer, Venango, Butler, Armstrong, Columbia, Lebanon, Allegheny districts (Pennsylvania province), Rutherford, Lake, Henry, Carter districts (Tennessee province), Virginia (Shenandoah, Greensville, Bedford, (Campbell, Nottoway, Prince George), Marshall district (West Virginia province), Pontotoc, Chickasaw districts (Mississippi province), Perry district (Indiana province)",,,290,Kph,Storm,Convective storm,--,--,10,100,2200000,2000500,,2002-0310 /04/2002,/04/2002,United States of America (the),USA,Arizona province,,,145,Km2,Wildfire,"Land fire (Brush, Bush, Pasture)",--,--,0,0,0,0,,2002-0312 @@ -858,7 +858,7 @@ Virginia (Shenandoah, Greensville, Bedford, (Campbell, Nottoway, Prince George), 26.06.07,06.07.07,United States of America (the),USA,"Wichita Falls area (Wichita district, Texas province), Georgetown areas (Williamson district, Texas province), Burnet, Marble Falls, Granite Shoals areas (Burnet district, Texas province), Granbury area (Hood district, Texas province), Lampasas, Parker, Eastland districts (Texas province), Miami, Commerce areas (Ottawa district, Oklahoma province), Shawnee, Tecumseh, Maud areas (Pottawatomie district, Oklahoma province), Oklahoma city (Oklahoma district, Oklahoma province), Waurika area (Jefferson district, Oklahoma province), Bartlesville, Dewey areas (Washington district, Oklahoma province), Love, Lincoln districts (Oklahoma province), Coffeyville area (Montgomery district, Kansas province), Osawatomie area (Miami district, Kansas province), Allen, Labette, Neosho, Wilson, Woodson districts (Kansas province), Rockville, Papinville areas (Bates district, Missouri province), Vernon district (Missouri province)",32.84,-97.17,507800,Km2,Flood,Riverine flood,--,Rain,8,5000,0,0,,2007-0244 19.06.07,20.06.07,United States of America (the),USA,New York province,42.23,-74.95,6500,Km2,Flood,Flash flood,Rain,--,4,120,0,0,,2007-0251 17.06.07,22.06.07,United States of America (the),USA,"North Texas, Oklahoma provinces",33.45,-97.3,34750,Km2,Flood,Riverine flood,--,--,10,750,28000,0,,2007-0254 -21.07.07,03.08.07,Bangladesh,BGD,"Goalanda village (Goalandaghat area, Rajbari district, Dhaka province), Aricha port (Shibalaya area, Manikganj district, Dhaka province), Bhagyakul village (Sreenagar area, Munshiganj district, Dhaka province), +21.07.07,03.08.07,Bangladesh,BGD,"Goalanda village (Goalandaghat area, Rajbari district, Dhaka province), Aricha port (Shibalaya area, Manikganj district, Dhaka province), Bhagyakul village (Sreenagar area, Munshiganj district, Dhaka province), Bandarban, Feni, Comilla districts (Chittagong province), Sirajganj district (Rajshahi province), Rangpur province",23.92,91.23,7000,Km2,Flood,Riverine flood,"Slide (land, mud, snow, rock)",--,1110,13771380,100000,0,,2007-0311 24.06.07,02.07.07,United States of America (the),USA,"Alpine, Amador, Calaveras, El Dorado, Mono, Placer, Tuolumne districts (California province)",,,,Km2,Wildfire,Forest fire,--,--,0,768,0,150000,,2007-0351 @@ -980,7 +980,7 @@ Bandarban, Feni, Comilla districts (Chittagong province), Sirajganj district (Ra 22.01.12,23.01.12,United States of America (the),USA,"Jefferson, Chilton districts (Alabama province)",,,240,Kph,Storm,Convective storm,--,--,2,100,175000,200000,,2012-0010 28.02.12,29.02.12,United States of America (the),USA,"Nebraska, Kansas, Missouri, Illinois, Indiana, Kentucky provinces",,,270,Kph,Storm,Convective storm,--,--,14,200,500000,450000,,2012-0055 02.03.12,04.03.12,United States of America (the),USA,"Alabama, Tennessee, Illinois, Kentucky, Indiana, Ohio, Georgia, Florida, Mississippi, North Carolina, Virginia provinces",,,112,Kph,Storm,Convective storm,Flood,Hail,41,0,5000000,2500000,,2012-0060 -06.04.12,06.04.12,Bangladesh,BGD,"Panchagarh, Rangpur, Nilphamari districts (Rangpur province), Noakhali, Comilla districts (Chittagong province), Narsingdi, Jamalpur, Faridpur, Shariatpur districts (Dhaka province), Jessore, Satkhira, Khulna, Chuadanga districts (Khulna province), Rajshahi district (Rajshahi province), Sylhet district (Sylhet province), +06.04.12,06.04.12,Bangladesh,BGD,"Panchagarh, Rangpur, Nilphamari districts (Rangpur province), Noakhali, Comilla districts (Chittagong province), Narsingdi, Jamalpur, Faridpur, Shariatpur districts (Dhaka province), Jessore, Satkhira, Khulna, Chuadanga districts (Khulna province), Rajshahi district (Rajshahi province), Sylhet district (Sylhet province), Bhola district (Barisal province)",,,56,Kph,Storm,Convective storm,Hail,--,25,55121,0,0,,2012-0082 02.04.12,03.04.12,United States of America (the),USA,"Dallas, Tarrant districts (Texas province)",,,,Kph,Storm,Convective storm,--,--,0,3300,1550000,800000,,2012-0122 14.04.12,15.04.12,United States of America (the),USA,"Oklahoma, Kansas, Iowa, Nebraska, South Dakota, Minnesota provinces",,,,Kph,Storm,Convective storm,--,--,6,297,1800000,910000,,2012-0156 @@ -1165,4 +1165,4 @@ Wilkes, Ashe )",,,140,Kph,Storm,Tropical cyclone,--,--,0,60,250000,0,Tropical de 03.11.17,12.12.17,Bangladesh,BGD,Cox�s Bazar ,,,,Vaccinated,Epidemic,Bacterial disease,--,--,15,789,0,0,Diphteria,2017-0556 06.03.17,09.03.17,United States of America (the),USA,"Missouri (Oak Grove in Jackson County, Clay and Clinton (Trimble, Plattsburg, Lathrop) counties), Iowa (Centerville in Appanoose county, Muscatine), Minnesota (Sherburne, Freeborn counties, Lake Ann in Carver county), Kansas (Wabaunsee, Pottawatomie and Butler counties), Wisconsin, Arkansas, Oklahoma, Illinois, Mississipi, Michigan, New-York, Pennsylvania, Massachussets, Ohio, Nebraska, Indiana",,,130,Kph,Storm,Convective storm,Hail,--,2,615,2200000,2000000,,2017-0563 25.03.17,28.03.17,United States of America (the),USA,"Texas (Justin in Denton, Collin, Rockwall, Lubbock counties, Seymour in Baylor, Dallas � Fort Worth metro area, Houston metro area), Oklahoma (El Reno in Canadian, Oklahoma city metro region, Caddo in Bryan, Cleveland South and East), Kansas (south), Kentucky, Tennessee, Mississippi, Alabama, Georgia, Indianapolis (Marion-IN)",,,175,Kph,Storm,Convective storm,Hail,Flood,1,0,2700000,2000000,,2017-0564 -/03/2017,/09/2017,United States of America (the),USA,"Upper Midwest, Northern Rockies and parts of the West",,,,Km2,Drought,Drought,--,--,0,0,2500000,1900000,,2017-9550 \ No newline at end of file +/03/2017,/09/2017,United States of America (the),USA,"Upper Midwest, Northern Rockies and parts of the West",,,,Km2,Drought,Drought,--,--,0,0,2500000,1900000,,2017-9550 diff --git a/climada/engine/test/data/emdat_testdata_fake_2007-2011.csv b/climada/engine/test/data/emdat_testdata_fake_2007-2011.csv index 6826050a44..3d6242746c 100644 --- a/climada/engine/test/data/emdat_testdata_fake_2007-2011.csv +++ b/climada/engine/test/data/emdat_testdata_fake_2007-2011.csv @@ -4,4 +4,4 @@ Start date,End date,Country,ISO,Location,Latitude,Longitude,Magnitude value,Magn 15.01.09,26.01.09,Switzerland,CHE,Zurich,47.37,8.54,1,Km2,Flood,Riverine flood,--,--,0,0,2000,0,FakeFlood3,2009-0001 15.01.10,27.01.10,Switzerland,CHE,Zurich,47.37,8.54,1,Km2,Flood,Riverine flood,--,--,0,0,2000,0,FakeFlood4,2010-0001 15.01.11,28.01.11,Switzerland,CHE,Zurich,47.37,8.54,1,Km2,Flood,Riverine flood,--,--,0,0,2000,0,FakeFlood5,2011-0001 -15.01.11,28.01.11,Germany,DEU,Konstanz,22,22,2,Km2,Flood,Riverine flood,--,--,0,0,1000,0,FakeFlood5,2011-0001 \ No newline at end of file +15.01.11,28.01.11,Germany,DEU,Konstanz,22,22,2,Km2,Flood,Riverine flood,--,--,0,0,1000,0,FakeFlood5,2011-0001 diff --git a/climada/hazard/test/data/trac_short_test.csv b/climada/hazard/test/data/trac_short_test.csv index 79defb690d..bacbd8c993 100644 --- a/climada/hazard/test/data/trac_short_test.csv +++ b/climada/hazard/test/data/trac_short_test.csv @@ -7,4 +7,4 @@ cgps_lat,cgps_lon,data_provider,gen_basin,ibtracsID,isotime,model,msize,ngps_lat 12.3,-31,hurdat_atl,NA,1951239N12334,1951082812,H08,101,12.3,-32.3,1,-999,1010,-999,0.1,0,6,25 12.3,-32.3,hurdat_atl,NA,1951239N12334,1951082818,H08,101,12.3,-33.6,1,-999,1010,-999,0.1,0,6,25 12.3,-33.6,hurdat_atl,NA,1951239N12334,1951082900,H08,101,12.3,-34.9,1,-999,1010,-999,0.1,0,6,25 -12.3,-34.9,hurdat_atl,NA,1951239N12334,1951082906,H08,101,12.3,-36.3,1,-999,1010,-999,0.1,0,6,25 \ No newline at end of file +12.3,-34.9,hurdat_atl,NA,1951239N12334,1951082906,H08,101,12.3,-36.3,1,-999,1010,-999,0.1,0,6,25 diff --git a/doc/Makefile b/doc/Makefile index 0a8a51eba2..41c2d07bf0 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -9,7 +9,7 @@ PAPER = # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) ./ +ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) ./ .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest diff --git a/doc/climada/climada.engine.rst b/doc/climada/climada.engine.rst index 91274418fa..f21024fdeb 100644 --- a/doc/climada/climada.engine.rst +++ b/doc/climada/climada.engine.rst @@ -52,4 +52,3 @@ climada\.engine\.impact\_data module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.entity.disc_rates.rst b/doc/climada/climada.entity.disc_rates.rst index bc17051c65..4089561f02 100644 --- a/doc/climada/climada.entity.disc_rates.rst +++ b/doc/climada/climada.entity.disc_rates.rst @@ -8,4 +8,3 @@ climada\.entity\.disc\_rates\.base module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.entity.exposures.litpop.rst b/doc/climada/climada.entity.exposures.litpop.rst index 9e65391b0b..62e233a063 100644 --- a/doc/climada/climada.entity.exposures.litpop.rst +++ b/doc/climada/climada.entity.exposures.litpop.rst @@ -24,4 +24,3 @@ climada\.entity\.exposures\.litpop\.nightlight module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.entity.exposures.rst b/doc/climada/climada.entity.exposures.rst index 30f175d10c..952af75e85 100644 --- a/doc/climada/climada.entity.exposures.rst +++ b/doc/climada/climada.entity.exposures.rst @@ -12,4 +12,3 @@ climada\.entity\.exposures\.base module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.entity.impact_funcs.rst b/doc/climada/climada.entity.impact_funcs.rst index 91f88ff77f..90ad9441b1 100644 --- a/doc/climada/climada.entity.impact_funcs.rst +++ b/doc/climada/climada.entity.impact_funcs.rst @@ -32,4 +32,3 @@ climada\.entity\.impact\_funcs\.trop\_cyclone module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.entity.measures.rst b/doc/climada/climada.entity.measures.rst index a7d16c650a..8e63a2082b 100644 --- a/doc/climada/climada.entity.measures.rst +++ b/doc/climada/climada.entity.measures.rst @@ -16,4 +16,3 @@ climada\.entity\.measures\.measure\_set module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.hazard.centroids.rst b/doc/climada/climada.hazard.centroids.rst index 8038d406ef..7a9c65a908 100644 --- a/doc/climada/climada.hazard.centroids.rst +++ b/doc/climada/climada.hazard.centroids.rst @@ -8,4 +8,3 @@ climada\.hazard\.centroids\.centr module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.hazard.rst b/doc/climada/climada.hazard.rst index 8e4767ae62..3b3bef00b4 100644 --- a/doc/climada/climada.hazard.rst +++ b/doc/climada/climada.hazard.rst @@ -69,4 +69,3 @@ climada\.hazard\.tc\_tracks\_synth module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.hazard.trop_cyclone.rst b/doc/climada/climada.hazard.trop_cyclone.rst index c703126ec1..caafdcd93a 100644 --- a/doc/climada/climada.hazard.trop_cyclone.rst +++ b/doc/climada/climada.hazard.trop_cyclone.rst @@ -16,4 +16,3 @@ climada\.hazard\.trop\_cyclone\.trop\_cyclone\_windfields module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.rst b/doc/climada/climada.rst index e248812bca..557532912f 100644 --- a/doc/climada/climada.rst +++ b/doc/climada/climada.rst @@ -8,4 +8,3 @@ Software documentation per package climada.entity climada.hazard climada.util - diff --git a/doc/climada/climada.util.rst b/doc/climada/climada.util.rst index 820fd43f7f..98df93aec1 100644 --- a/doc/climada/climada.util.rst +++ b/doc/climada/climada.util.rst @@ -152,4 +152,3 @@ climada\.util\.yearsets module :members: :undoc-members: :show-inheritance: - diff --git a/doc/conf.py b/doc/conf.py index 02e19ecc07..b4ef1dc69d 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -18,49 +18,52 @@ # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. # sys.path.append(os.path.abspath('sphinxext')) -sys.path.insert(0, os.path.abspath('../')) +sys.path.insert(0, os.path.abspath("../")) # set version from climada import _version + __version__ = _version.__version__ # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['matplotlib.sphinxext.plot_directive', - 'IPython.sphinxext.ipython_directive', - 'IPython.sphinxext.ipython_console_highlighting', - 'sphinx.ext.mathjax', - 'sphinx.ext.autodoc', - 'sphinx.ext.doctest', - 'sphinx.ext.inheritance_diagram', - 'sphinx.ext.viewcode', - 'sphinx.ext.napoleon', - 'sphinx.ext.ifconfig', - 'myst_nb', - 'sphinx_markdown_tables', - 'readthedocs_ext.readthedocs',] +extensions = [ + "matplotlib.sphinxext.plot_directive", + "IPython.sphinxext.ipython_directive", + "IPython.sphinxext.ipython_console_highlighting", + "sphinx.ext.mathjax", + "sphinx.ext.autodoc", + "sphinx.ext.doctest", + "sphinx.ext.inheritance_diagram", + "sphinx.ext.viewcode", + "sphinx.ext.napoleon", + "sphinx.ext.ifconfig", + "myst_nb", + "sphinx_markdown_tables", + "readthedocs_ext.readthedocs", +] # read the docs version used for links -if 'dev' in __version__: - read_docs_url = 'en/latest/' +if "dev" in __version__: + read_docs_url = "en/latest/" else: - read_docs_url = 'en/v{}/'.format(__version__) + read_docs_url = "en/v{}/".format(__version__) # Add any paths that contain templates here, relative to this directory. templates_path = [] # The encoding of source files. -#source_encoding = 'utf-8' +# source_encoding = 'utf-8' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'CLIMADA' -copyright = '2017, ETH Zurich' -author = 'CLIMADA contributors' +project = "CLIMADA" +copyright = "2017, ETH Zurich" +author = "CLIMADA contributors" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -73,45 +76,45 @@ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -language = 'en' +language = "en" # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. -#unused_docs = [] +# unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. # exclude_trees = [] # The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'test', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "test", "Thumbs.db", ".DS_Store"] # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True @@ -125,17 +128,17 @@ # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. @@ -149,45 +152,45 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_use_modindex = True +# html_use_modindex = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' +# html_file_suffix = '' # Output file base name for HTML help builder. -htmlhelp_basename = 'climadadoc' +htmlhelp_basename = "climadadoc" # -- Options for LaTeX output -------------------------------------------------- @@ -195,47 +198,55 @@ latex_engine = "xelatex" # The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' +# latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' +# latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - (master_doc, 'climada.tex', u'CLIMADA documentation', - u'CLIMADA contributors', 'manual'), + ( + master_doc, + "climada.tex", + "CLIMADA documentation", + "CLIMADA contributors", + "manual", + ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # Additional stuff for the LaTeX preamble. -#latex_preamble = '' +# latex_preamble = '' # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_use_modindex = True +# latex_use_modindex = True + # ----------------------------------------------------------------------------- # show __init__ documentation def skip(app, what, name, obj, skip, options): - if (name == "__init__"): + if name == "__init__": return False return skip + # remove docstrings modules def remove_module_docstring(app, what, name, obj, options, lines): if what == "module": del lines[:] + autodoc_member_order = "bysource" # --- MYST Parser settings ---- @@ -260,13 +271,15 @@ def remove_module_docstring(app, what, name, obj, options, lines): # --- + def setup(app): app.connect("autodoc-skip-member", skip) app.connect("autodoc-process-docstring", remove_module_docstring) # Pass to the app if we are building this on ReadTheDocs - on_rtd = True if (os.environ.get('READTHEDOCS') == 'True') else False - app.add_config_value('readthedocs', on_rtd, 'env') + on_rtd = True if (os.environ.get("READTHEDOCS") == "True") else False + app.add_config_value("readthedocs", on_rtd, "env") + # improve parameters description napoleon_use_param = False diff --git a/doc/guide/Guide_Configuration.ipynb b/doc/guide/Guide_Configuration.ipynb index 50ffc35f2f..69056eba61 100644 --- a/doc/guide/Guide_Configuration.ipynb +++ b/doc/guide/Guide_Configuration.ipynb @@ -54,9 +54,9 @@ ], "source": [ "# suboptimal\n", - "my_dict = {'x': 4}\n", - "if my_dict['x'] > 3:\n", - " msg = 'well, arh, ...'\n", + "my_dict = {\"x\": 4}\n", + "if my_dict[\"x\"] > 3:\n", + " msg = \"well, arh, ...\"\n", "msg" ] }, @@ -78,10 +78,10 @@ ], "source": [ "# good\n", - "X = 'x'\n", + "X = \"x\"\n", "my_dict = {X: 4}\n", "if my_dict[X] > 3:\n", - " msg = 'yeah!'\n", + " msg = \"yeah!\"\n", "msg" ] }, @@ -103,7 +103,7 @@ ], "source": [ "# possibly overdoing it\n", - "X = 'x'\n", + "X = \"x\"\n", "Y = \"this doesn't mean that every string must be a constant\"\n", "my_dict = {X: 4}\n", "if my_dict[X] > 3:\n", @@ -139,13 +139,16 @@ ], "source": [ "import pandas as pd\n", - "X = 'x'\n", - "df = pd.DataFrame({'x':[1,2,3], 'y':[4,5,6]})\n", + "\n", + "X = \"x\"\n", + "df = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [4, 5, 6]})\n", "try:\n", " df.X\n", "except:\n", - " from sys import stderr; stderr.write(\"this does not work\\n\")\n", - "df[X] # this does work but it's less pretty\n", + " from sys import stderr\n", + "\n", + " stderr.write(\"this does not work\\n\")\n", + "df[X] # this does work but it's less pretty\n", "df.x" ] }, @@ -357,7 +360,9 @@ "try:\n", " CONFIG.hazard.trop_cyclone.random_seed.str()\n", "except Exception as e:\n", - " from sys import stderr; stderr.write(f\"cannot convert random_seed to str: {e}\\n\")" + " from sys import stderr\n", + "\n", + " stderr.write(f\"cannot convert random_seed to str: {e}\\n\")" ] }, { diff --git a/doc/guide/Guide_Exception_Logging.ipynb b/doc/guide/Guide_Exception_Logging.ipynb index 55341f434b..b4f776aa98 100644 --- a/doc/guide/Guide_Exception_Logging.ipynb +++ b/doc/guide/Guide_Exception_Logging.ipynb @@ -44,7 +44,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Bad (1)\n", + "# Bad (1)\n", "x = 1\n", "try:\n", " l = len(events)\n", @@ -60,7 +60,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Still bad (2)\n", + "# Still bad (2)\n", "try:\n", " l = len(events)\n", " if l < 1:\n", @@ -75,7 +75,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Better, but still unsufficient (3)\n", + "# Better, but still unsufficient (3)\n", "try:\n", " l = len(events)\n", " if l < 1:\n", @@ -90,7 +90,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Even better (4)\n", + "# Even better (4)\n", "try:\n", " l = len(events)\n", "except TypeError:\n", @@ -105,13 +105,13 @@ "metadata": {}, "outputs": [], "source": [ - "#Even better (5)\n", + "# Even better (5)\n", "try:\n", " l = len(events)\n", "except TypeError as tper:\n", " raise TypeError(\"The provided variable events is not a list\") from tper\n", "if l < 1:\n", - " raise ValueError(\"To compute an impact there must be at least one event.\")\n" + " raise ValueError(\"To compute an impact there must be at least one event.\")" ] }, { @@ -172,6 +172,7 @@ "source": [ "import logging\n", "from climada.util.config import LOGGER\n", + "\n", "LOGGER.setLevel(logging.ERROR)" ] }, diff --git a/doc/guide/Guide_Py_Performance.ipynb b/doc/guide/Guide_Py_Performance.ipynb index bb3cf209f3..21f81313dd 100644 --- a/doc/guide/Guide_Py_Performance.ipynb +++ b/doc/guide/Guide_Py_Performance.ipynb @@ -188,6 +188,7 @@ ], "source": [ "import numpy as np\n", + "\n", "%timeit np.sum(list_of_numbers)" ] }, @@ -947,6 +948,7 @@ "source": [ "from numba import njit\n", "\n", + "\n", "@njit\n", "def sum_array(arr):\n", " result = 0.0\n", diff --git a/doc/guide/Guide_PythonDos-n-Donts.ipynb b/doc/guide/Guide_PythonDos-n-Donts.ipynb index 85295356aa..222ffd0ab3 100644 --- a/doc/guide/Guide_PythonDos-n-Donts.ipynb +++ b/doc/guide/Guide_PythonDos-n-Donts.ipynb @@ -147,14 +147,12 @@ "outputs": [], "source": [ "# Vertically aligned with opening delimiter.\n", - "foo = long_function_name(var_one, var_two,\n", - " var_three, var_four)\n", + "foo = long_function_name(var_one, var_two, var_three, var_four)\n", + "\n", "\n", "# Hanging indentation (4 additonal spaces)\n", - "def very_very_long_function_name(\n", - " var_one, var_two, var_three,\n", - " var_four):\n", - " print(var_one)\n" + "def very_very_long_function_name(var_one, var_two, var_three, var_four):\n", + " print(var_one)" ] }, { @@ -303,6 +301,8 @@ " return math.sqrt(x)\n", " else:\n", " return None\n", + "\n", + "\n", "# Wrong\n", "def foo(x):\n", " if x >= 0:\n", @@ -601,7 +601,7 @@ "source": [ "@uppercase_decorator\n", "def say_hi():\n", - " return 'hello there'" + " return \"hello there\"" ] }, { diff --git a/doc/guide/Guide_Testing.ipynb b/doc/guide/Guide_Testing.ipynb index f1876080ce..319d8ada55 100644 --- a/doc/guide/Guide_Testing.ipynb +++ b/doc/guide/Guide_Testing.ipynb @@ -209,7 +209,9 @@ "source": [ "from climada.test import get_test_file\n", "\n", - "my_test_file = get_test_file(ds_name='my-test-file', file_format='hdf5') # returns a pathlib.Path object" + "my_test_file = get_test_file(\n", + " ds_name=\"my-test-file\", file_format=\"hdf5\"\n", + ") # returns a pathlib.Path object" ] }, { @@ -240,11 +242,16 @@ "outputs": [], "source": [ "import climada\n", + "\n", + "\n", "def x(download_file=climada.util.files_handler.download_file):\n", - " filepath = download_file('http://real_data.ch')\n", + " filepath = download_file(\"http://real_data.ch\")\n", " return Path(filepath).stat().st_size\n", "\n", + "\n", "import unittest\n", + "\n", + "\n", "class TestX(unittest.TestCase):\n", " def download_file_dummy(url):\n", " return \"phony_data.ch\"\n", diff --git a/doc/guide/Guide_continuous_integration_GitHub_actions.ipynb b/doc/guide/Guide_continuous_integration_GitHub_actions.ipynb index d9b1d9053a..f800f8eda3 100644 --- a/doc/guide/Guide_continuous_integration_GitHub_actions.ipynb +++ b/doc/guide/Guide_continuous_integration_GitHub_actions.ipynb @@ -95,20 +95,23 @@ } ], "source": [ - "def x(b:bool):\n", + "def x(b: bool):\n", " if b:\n", - " print('been here')\n", + " print(\"been here\")\n", " return 4\n", " else:\n", - " print('been there')\n", + " print(\"been there\")\n", " return 0\n", "\n", - "def y(b:bool):\n", - " print('been everywhere')\n", - " return 1/x(b)\n", + "\n", + "def y(b: bool):\n", + " print(\"been everywhere\")\n", + " return 1 / x(b)\n", "\n", "\n", "import unittest\n", + "\n", + "\n", "class TestXY(unittest.TestCase):\n", " def test_x(self):\n", " self.assertEqual(x(True), 4)\n", @@ -117,6 +120,7 @@ " def test_y(self):\n", " self.assertEqual(y(True), 0.25)\n", "\n", + "\n", "unittest.TextTestRunner().run(unittest.TestLoader().loadTestsFromTestCase(TestXY));" ] }, diff --git a/doc/index.rst b/doc/index.rst index 732290eeee..4ad14dd788 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -69,7 +69,7 @@ Jump right in: .. toctree:: :caption: API Reference :hidden: - + Python Modules diff --git a/doc/misc/AUTHORS.md b/doc/misc/AUTHORS.md index 2d2e8405f4..561ed5cd36 120000 --- a/doc/misc/AUTHORS.md +++ b/doc/misc/AUTHORS.md @@ -1 +1 @@ -../../AUTHORS.md \ No newline at end of file +../../AUTHORS.md diff --git a/doc/misc/CHANGELOG.md b/doc/misc/CHANGELOG.md index 699cc9e7b7..03cb731062 120000 --- a/doc/misc/CHANGELOG.md +++ b/doc/misc/CHANGELOG.md @@ -1 +1 @@ -../../CHANGELOG.md \ No newline at end of file +../../CHANGELOG.md diff --git a/doc/misc/CONTRIBUTING.md b/doc/misc/CONTRIBUTING.md index f939e75f21..bcac999a8e 120000 --- a/doc/misc/CONTRIBUTING.md +++ b/doc/misc/CONTRIBUTING.md @@ -1 +1 @@ -../../CONTRIBUTING.md \ No newline at end of file +../../CONTRIBUTING.md diff --git a/doc/tutorial/0_intro_python.ipynb b/doc/tutorial/0_intro_python.ipynb index 43df82d5bc..8318986028 100644 --- a/doc/tutorial/0_intro_python.ipynb +++ b/doc/tutorial/0_intro_python.ipynb @@ -27,15 +27,15 @@ "metadata": {}, "outputs": [], "source": [ - "print('Addition: 2 + 2 =', 2 + 2)\n", - "print('Substraction: 50 - 5*6 =', 50 - 5*6)\n", - "print('Use of parenthesis: (50 - 5*6) / 4 =', (50 - 5*6) / 4)\n", - "print('Classic division returns a float: 17 / 3 =', 17 / 3)\n", - "print('Floor division discards the fractional part: 17 // 3 =', 17 // 3)\n", - "print('The % operator returns the remainder of the division: 17 % 3 =', 17 % 3)\n", - "print('Result * divisor + remainder: 5 * 3 + 2 =', 5 * 3 + 2)\n", - "print('5 squared: 5 ** 2 =', 5 ** 2)\n", - "print('2 to the power of 7: 2 ** 7 =', 2 ** 7)" + "print(\"Addition: 2 + 2 =\", 2 + 2)\n", + "print(\"Substraction: 50 - 5*6 =\", 50 - 5 * 6)\n", + "print(\"Use of parenthesis: (50 - 5*6) / 4 =\", (50 - 5 * 6) / 4)\n", + "print(\"Classic division returns a float: 17 / 3 =\", 17 / 3)\n", + "print(\"Floor division discards the fractional part: 17 // 3 =\", 17 // 3)\n", + "print(\"The % operator returns the remainder of the division: 17 % 3 =\", 17 % 3)\n", + "print(\"Result * divisor + remainder: 5 * 3 + 2 =\", 5 * 3 + 2)\n", + "print(\"5 squared: 5 ** 2 =\", 5**2)\n", + "print(\"2 to the power of 7: 2 ** 7 =\", 2**7)" ] }, { @@ -72,11 +72,11 @@ "metadata": {}, "outputs": [], "source": [ - "print('spam eggs') # single quotes\n", - "print('doesn\\'t') # use \\' to escape the single quote...\n", - "print(\"doesn't\") # ...or use double quotes instead\n", + "print(\"spam eggs\") # single quotes\n", + "print(\"doesn't\") # use \\' to escape the single quote...\n", + "print(\"doesn't\") # ...or use double quotes instead\n", + "print('\"Yes,\" he said.')\n", "print('\"Yes,\" he said.')\n", - "print(\"\\\"Yes,\\\" he said.\")\n", "print('\"Isn\\'t,\" she said.')" ] }, @@ -96,13 +96,13 @@ "metadata": {}, "outputs": [], "source": [ - "word = 'Python'\n", - "print('word = ', word)\n", - "print('Character in position 0: word[0] =', word[0])\n", - "print('Character in position 5: word[5] =', word[5])\n", - "print('Last character: word[-1] =', word[-1])\n", - "print('Second-last character: word[-2] =', word[-2])\n", - "print('word[-6] =', word[-6])" + "word = \"Python\"\n", + "print(\"word = \", word)\n", + "print(\"Character in position 0: word[0] =\", word[0])\n", + "print(\"Character in position 5: word[5] =\", word[5])\n", + "print(\"Last character: word[-1] =\", word[-1])\n", + "print(\"Second-last character: word[-2] =\", word[-2])\n", + "print(\"word[-6] =\", word[-6])" ] }, { @@ -118,8 +118,8 @@ "metadata": {}, "outputs": [], "source": [ - "print('Characters from position 0 (included) to 2 (excluded): word[0:2] =', word[0:2])\n", - "print('Characters from position 2 (included) to 5 (excluded): word[2:5] =', word[2:5])" + "print(\"Characters from position 0 (included) to 2 (excluded): word[0:2] =\", word[0:2])\n", + "print(\"Characters from position 2 (included) to 5 (excluded): word[2:5] =\", word[2:5])" ] }, { @@ -145,11 +145,11 @@ "outputs": [], "source": [ "squares = [1, 4, 9, 16, 25]\n", - "print('squares: ', squares)\n", - "print('Indexing returns the item: squares[0]:', squares[0])\n", - "print('squares[-1]:', squares[-1])\n", - "print('Slicing returns a new list: squares[-3:]:', squares[-3:])\n", - "print('squares[:]:', squares[:])" + "print(\"squares: \", squares)\n", + "print(\"Indexing returns the item: squares[0]:\", squares[0])\n", + "print(\"squares[-1]:\", squares[-1])\n", + "print(\"Slicing returns a new list: squares[-3:]:\", squares[-3:])\n", + "print(\"squares[:]:\", squares[:])" ] }, { @@ -184,7 +184,7 @@ "cubes = [1, 8, 27, 65, 125] # something's wrong here\n", "cubes[3] = 64 # replace the wrong value\n", "cubes.append(216) # add the cube of 6\n", - "cubes.append(7 ** 3) # and the cube of 7\n", + "cubes.append(7**3) # and the cube of 7\n", "cubes" ] }, @@ -197,8 +197,8 @@ "# Note: execution of this cell will fail\n", "\n", "# Try to modify a character of a string\n", - "word = 'Python'\n", - "word[0] = 'p'" + "word = \"Python\"\n", + "word[0] = \"p\"" ] }, { @@ -262,7 +262,7 @@ "metadata": {}, "outputs": [], "source": [ - "t = 12345, 54321, 'hello!'\n", + "t = 12345, 54321, \"hello!\"\n", "t[0]" ] }, @@ -322,8 +322,8 @@ "metadata": {}, "outputs": [], "source": [ - "t = 12345, 54321, 'hello!' # tuple packing\n", - "x, y, z = t # tuple unpacking\n", + "t = 12345, 54321, \"hello!\" # tuple packing\n", + "x, y, z = t # tuple unpacking\n", "x, y, z" ] }, @@ -344,8 +344,8 @@ "metadata": {}, "outputs": [], "source": [ - "basket = {'apple', 'orange', 'apple', 'pear', 'orange', 'banana'}\n", - "basket # show that duplicates have been removed" + "basket = {\"apple\", \"orange\", \"apple\", \"pear\", \"orange\", \"banana\"}\n", + "basket # show that duplicates have been removed" ] }, { @@ -354,7 +354,7 @@ "metadata": {}, "outputs": [], "source": [ - "'orange' in basket # fast membership testing" + "\"orange\" in basket # fast membership testing" ] }, { @@ -363,7 +363,7 @@ "metadata": {}, "outputs": [], "source": [ - "'crabgrass' in basket" + "\"crabgrass\" in basket" ] }, { @@ -373,9 +373,9 @@ "outputs": [], "source": [ "# Demonstrate set operations on unique letters from two words\n", - "a = set('abracadabra')\n", - "b = set('alacazam')\n", - "a # unique letters in a" + "a = set(\"abracadabra\")\n", + "b = set(\"alacazam\")\n", + "a # unique letters in a" ] }, { @@ -384,7 +384,7 @@ "metadata": {}, "outputs": [], "source": [ - "a - b # letters in a but not in b" + "a - b # letters in a but not in b" ] }, { @@ -393,7 +393,7 @@ "metadata": {}, "outputs": [], "source": [ - "a | b # letters in a or b or both" + "a | b # letters in a or b or both" ] }, { @@ -402,7 +402,7 @@ "metadata": {}, "outputs": [], "source": [ - "a & b # letters in both a and b" + "a & b # letters in both a and b" ] }, { @@ -411,7 +411,7 @@ "metadata": {}, "outputs": [], "source": [ - "a ^ b # letters in a or b but not both" + "a ^ b # letters in a or b but not both" ] }, { @@ -440,7 +440,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Define a new set and try some set methods (freestyle)\n" + "# Define a new set and try some set methods (freestyle)" ] }, { @@ -465,8 +465,8 @@ "metadata": {}, "outputs": [], "source": [ - "tel = {'jack': 4098, 'sape': 4139}\n", - "tel['guido'] = 4127\n", + "tel = {\"jack\": 4098, \"sape\": 4139}\n", + "tel[\"guido\"] = 4127\n", "tel" ] }, @@ -476,7 +476,7 @@ "metadata": {}, "outputs": [], "source": [ - "tel['jack']" + "tel[\"jack\"]" ] }, { @@ -485,7 +485,7 @@ "metadata": {}, "outputs": [], "source": [ - "del tel['sape']" + "del tel[\"sape\"]" ] }, { @@ -494,7 +494,7 @@ "metadata": {}, "outputs": [], "source": [ - "tel['irv'] = 4127\n", + "tel[\"irv\"] = 4127\n", "tel" ] }, @@ -522,7 +522,7 @@ "metadata": {}, "outputs": [], "source": [ - "'guido' in tel" + "\"guido\" in tel" ] }, { @@ -531,7 +531,7 @@ "metadata": {}, "outputs": [], "source": [ - "'jack' not in tel" + "\"jack\" not in tel" ] }, { @@ -554,13 +554,13 @@ "metadata": {}, "outputs": [], "source": [ - "def fib(n): # write Fibonacci series up to n\n", - " \"\"\"Print a Fibonacci series up to n.\"\"\"\n", - " a, b = 0, 1 # two assignments in one line\n", - " while a < n:\n", - " print(a, end=' ')\n", - " a, b = b, a+b # two assignments in one line\n", - " print()" + "def fib(n): # write Fibonacci series up to n\n", + " \"\"\"Print a Fibonacci series up to n.\"\"\"\n", + " a, b = 0, 1 # two assignments in one line\n", + " while a < n:\n", + " print(a, end=\" \")\n", + " a, b = b, a + b # two assignments in one line\n", + " print()" ] }, { @@ -587,7 +587,7 @@ "outputs": [], "source": [ "print(fib)\n", - "print(type(fib)) # function type\n", + "print(type(fib)) # function type\n", "f = fib\n", "f(100)" ] @@ -608,15 +608,16 @@ "def dummy(x):\n", " x += x\n", "\n", + "\n", "xx = 5\n", - "print('xx before function call: ', xx)\n", + "print(\"xx before function call: \", xx)\n", "dummy(xx)\n", - "print('xx after function call: ', xx)\n", + "print(\"xx after function call: \", xx)\n", "\n", "yy = [5]\n", - "print('yy before function call: ', yy)\n", + "print(\"yy before function call: \", yy)\n", "dummy(yy)\n", - "print('yy after function call: ', yy)" + "print(\"yy after function call: \", yy)" ] }, { @@ -634,16 +635,16 @@ "metadata": {}, "outputs": [], "source": [ - "def ask_ok(prompt, retries=4, reminder='Please try again!'):\n", + "def ask_ok(prompt, retries=4, reminder=\"Please try again!\"):\n", " while True:\n", " ok = input(prompt)\n", - " if ok in ('y', 'ye', 'yes'):\n", + " if ok in (\"y\", \"ye\", \"yes\"):\n", " return True\n", - " if ok in ('n', 'no', 'nop', 'nope'):\n", + " if ok in (\"n\", \"no\", \"nop\", \"nope\"):\n", " return False\n", " retries = retries - 1\n", " if retries < 0:\n", - " raise ValueError('invalid user response')\n", + " raise ValueError(\"invalid user response\")\n", " print(reminder)" ] }, @@ -653,10 +654,10 @@ "metadata": {}, "outputs": [], "source": [ - "#This function can be called in several ways:\n", + "# This function can be called in several ways:\n", "\n", - "#giving only the mandatory argument:\n", - "ask_ok('Do you really want to quit?')\n" + "# giving only the mandatory argument:\n", + "ask_ok(\"Do you really want to quit?\")" ] }, { @@ -666,7 +667,7 @@ "outputs": [], "source": [ "# giving one of the optional arguments:\n", - "ask_ok('OK to overwrite the file?', 2)\n" + "ask_ok(\"OK to overwrite the file?\", 2)" ] }, { @@ -676,7 +677,7 @@ "outputs": [], "source": [ "# or even giving all arguments:\n", - "ask_ok('OK to overwrite the file?', 2, 'Come on, only yes or no!')" + "ask_ok(\"OK to overwrite the file?\", 2, \"Come on, only yes or no!\")" ] }, { @@ -692,7 +693,7 @@ "metadata": {}, "outputs": [], "source": [ - "ask_ok('OK to overwrite the file?', reminder='Come on, only yes or no!')" + "ask_ok(\"OK to overwrite the file?\", reminder=\"Come on, only yes or no!\")" ] }, { @@ -710,9 +711,11 @@ "source": [ "def test(x=None):\n", " if x is None:\n", - " print('no x here')\n", + " print(\"no x here\")\n", " else:\n", " print(x)\n", + "\n", + "\n", "test()" ] }, @@ -736,15 +739,15 @@ "metadata": {}, "outputs": [], "source": [ - "class Dog: # same as \"class Dog(object)\"\n", + "class Dog: # same as \"class Dog(object)\"\n", "\n", - " kind = 'canine' # class variable shared by all instances\n", + " kind = \"canine\" # class variable shared by all instances\n", "\n", - " def __init__(self, name): # initialization method\n", - " self.name = name # instance variable unique to each instance\n", - " self.tricks = [] # creates a new empty list for each dog\n", + " def __init__(self, name): # initialization method\n", + " self.name = name # instance variable unique to each instance\n", + " self.tricks = [] # creates a new empty list for each dog\n", "\n", - " def add_trick(self, trick): # class method\n", + " def add_trick(self, trick): # class method\n", " self.tricks.append(trick)" ] }, @@ -761,7 +764,9 @@ "metadata": {}, "outputs": [], "source": [ - "d = Dog('Fido') # creates a new instance of the class and assigns this object to the local variable d\n", + "d = Dog(\n", + " \"Fido\"\n", + ") # creates a new instance of the class and assigns this object to the local variable d\n", "d.name" ] }, @@ -771,9 +776,11 @@ "metadata": {}, "outputs": [], "source": [ - "e = Dog('Buddy') # creates a new instance of the class and assigns this object to the local variable e\n", - "d.add_trick('roll over')\n", - "e.add_trick('play dead')" + "e = Dog(\n", + " \"Buddy\"\n", + ") # creates a new instance of the class and assigns this object to the local variable e\n", + "d.add_trick(\"roll over\")\n", + "e.add_trick(\"play dead\")" ] }, { @@ -782,7 +789,7 @@ "metadata": {}, "outputs": [], "source": [ - "d.tricks # unique to d" + "d.tricks # unique to d" ] }, { @@ -791,7 +798,7 @@ "metadata": {}, "outputs": [], "source": [ - "e.tricks # unique to e" + "e.tricks # unique to e" ] }, { @@ -800,7 +807,7 @@ "metadata": {}, "outputs": [], "source": [ - "d.kind # shared by all dogs" + "d.kind # shared by all dogs" ] }, { @@ -809,7 +816,7 @@ "metadata": {}, "outputs": [], "source": [ - "e.kind # shared by all dogs" + "e.kind # shared by all dogs" ] }, { @@ -831,19 +838,22 @@ "metadata": {}, "outputs": [], "source": [ - "class Animal: # base class\n", + "class Animal: # base class\n", "\n", " def __init__(self, kind):\n", " self.kind = kind\n", " self.tricks = []\n", "\n", - " def add_trick(self, trick): # class method\n", + " def add_trick(self, trick): # class method\n", " self.tricks.append(trick)\n", "\n", - "class Dog(Animal): # derived class\n", "\n", - " def __init__(self): # override of __init__ base method\n", - " super(Dog, self).__init__('canine') # call Animal __init__ method with input string" + "class Dog(Animal): # derived class\n", + "\n", + " def __init__(self): # override of __init__ base method\n", + " super(Dog, self).__init__(\n", + " \"canine\"\n", + " ) # call Animal __init__ method with input string" ] }, { @@ -852,9 +862,9 @@ "metadata": {}, "outputs": [], "source": [ - "fido = Dog() # fido is automatically an animal of kind 'canine'\n", + "fido = Dog() # fido is automatically an animal of kind 'canine'\n", "print(fido.kind)\n", - "fido.add_trick('play dead') # Dog class can use Animal class\n", + "fido.add_trick(\"play dead\") # Dog class can use Animal class\n", "print(fido.tricks)" ] }, @@ -893,7 +903,8 @@ " for item in iterable:\n", " self.items_list.append(item)\n", "\n", - " __update = update # private copy of original update() method\n", + " __update = update # private copy of original update() method\n", + "\n", "\n", "class MappingSubclass(Mapping):\n", "\n", diff --git a/doc/tutorial/1_main_climada.ipynb b/doc/tutorial/1_main_climada.ipynb index 730d5e5ed1..36ce87bb2e 100644 --- a/doc/tutorial/1_main_climada.ipynb +++ b/doc/tutorial/1_main_climada.ipynb @@ -182,10 +182,13 @@ "source": [ "import numpy as np\n", "from climada.hazard import TCTracks\n", - "import warnings # To hide the warnings\n", - "warnings.filterwarnings('ignore')\n", + "import warnings # To hide the warnings\n", "\n", - "tracks = TCTracks.from_ibtracs_netcdf(provider='usa', basin='NA') # Here we download the full dataset for the analysis\n", + "warnings.filterwarnings(\"ignore\")\n", + "\n", + "tracks = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", basin=\"NA\"\n", + ") # Here we download the full dataset for the analysis\n", "# afterwards (e.g. return period), but you can also use \"year_range\" to adjust the range of the dataset to be downloaded.\n", "# While doing that, you need to make sure that the year 2017 is included if you want to run the blocks with the codes\n", "# subsetting a specific tropic cyclone, which happened in 2017. (Of course, you can also change the subsetting codes.)" @@ -220,8 +223,10 @@ ], "source": [ "# plotting tracks can be very time consuming, depending on the number of tracks. So we choose only a few here, by limiting the time range to one year\n", - "tracks_2017 = TCTracks.from_ibtracs_netcdf(provider='usa', basin='NA', year_range = (2017, 2017))\n", - "tracks_2017 .plot(); # This may take a very long time" + "tracks_2017 = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", basin=\"NA\", year_range=(2017, 2017)\n", + ")\n", + "tracks_2017.plot(); # This may take a very long time" ] }, { @@ -368,7 +373,9 @@ } ], "source": [ - "tracks.subset({\"sid\": \"2017260N12310\"}).plot(); # This is how we subset a TCTracks object" + "tracks.subset(\n", + " {\"sid\": \"2017260N12310\"}\n", + ").plot(); # This is how we subset a TCTracks object" ] }, { @@ -397,7 +404,7 @@ } ], "source": [ - "haz.plot_intensity(event='2017260N12310');" + "haz.plot_intensity(event=\"2017260N12310\");" ] }, { @@ -433,7 +440,7 @@ } ], "source": [ - "haz.plot_rp_intensity(return_periods=(5,10,20,40));" + "haz.plot_rp_intensity(return_periods=(5, 10, 20, 40));" ] }, { @@ -553,8 +560,10 @@ "source": [ "from climada.entity.exposures import LitPop\n", "\n", - "exp_litpop = LitPop.from_countries('Puerto Rico', res_arcsec = 120) # We'll go lower resolution than default to keep it simple\n", - "exp_litpop.set_geometry_points() # Set geodataframe geometries from lat lon data\n", + "exp_litpop = LitPop.from_countries(\n", + " \"Puerto Rico\", res_arcsec=120\n", + ") # We'll go lower resolution than default to keep it simple\n", + "exp_litpop.set_geometry_points() # Set geodataframe geometries from lat lon data\n", "\n", "exp_litpop.plot_hexbin(pop_name=True, linewidth=4, buffer=0.1);" ] @@ -647,7 +656,7 @@ } ], "source": [ - "exp_litpop.gdf['impf_TC'] = 1" + "exp_litpop.gdf[\"impf_TC\"] = 1" ] }, { @@ -688,8 +697,8 @@ "from climada.entity import Measure, MeasureSet\n", "\n", "meas_mangrove = Measure(\n", - " name='Mangrove',\n", - " haz_type='TC',\n", + " name=\"Mangrove\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([0.2, 0.2, 0.7]),\n", " cost=500000000,\n", " mdd_impact=(1, 0),\n", @@ -762,11 +771,13 @@ } ], "source": [ - "mangrove_exp, mangrove_imp_fun_set, mangrove_haz = meas_mangrove.apply(exp_litpop, imp_fun_set, haz)\n", + "mangrove_exp, mangrove_imp_fun_set, mangrove_haz = meas_mangrove.apply(\n", + " exp_litpop, imp_fun_set, haz\n", + ")\n", "axes1 = imp_fun_set.plot()\n", - "axes1.set_title('TC: Emanuel (2011) impact function')\n", + "axes1.set_title(\"TC: Emanuel (2011) impact function\")\n", "axes2 = mangrove_imp_fun_set.plot()\n", - "axes2.set_title('TC: Modified impact function')" + "axes2.set_title(\"TC: Modified impact function\")" ] }, { @@ -792,8 +803,8 @@ ], "source": [ "meas_buildings = Measure(\n", - " name='Building code',\n", - " haz_type='TC',\n", + " name=\"Building code\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([0.2, 0.7, 0.5]),\n", " cost=100000000,\n", " hazard_freq_cutoff=0.1,\n", @@ -802,7 +813,9 @@ "meas_set.append(meas_buildings)\n", "meas_set.check()\n", "\n", - "buildings_exp, buildings_imp_fun_set, buildings_haz = meas_buildings.apply(exp_litpop, imp_fun_set, haz)" + "buildings_exp, buildings_imp_fun_set, buildings_haz = meas_buildings.apply(\n", + " exp_litpop, imp_fun_set, haz\n", + ")" ] }, { @@ -861,7 +874,7 @@ } ], "source": [ - "haz.plot_rp_intensity(return_periods=(5, 20));\n", + "haz.plot_rp_intensity(return_periods=(5, 20))\n", "buildings_haz.plot_rp_intensity(return_periods=(5, 20));" ] }, @@ -906,8 +919,8 @@ "source": [ "from climada.entity import DiscRates\n", "\n", - "years=np.arange(1950, 2101)\n", - "rates=np.ones(years.size) * 0.02\n", + "years = np.arange(1950, 2101)\n", + "rates = np.ones(years.size) * 0.02\n", "disc = DiscRates(years=years, rates=rates)\n", "disc.check()\n", "disc.plot()" @@ -941,7 +954,7 @@ " exposures=exp_litpop,\n", " disc_rates=disc,\n", " impact_func_set=imp_fun_set,\n", - " measure_set=meas_set\n", + " measure_set=meas_set,\n", ")" ] }, @@ -1030,10 +1043,10 @@ } ], "source": [ - "freq_curve = imp.calc_freq_curve() # impact exceedance frequency curve\n", - "freq_curve.plot();\n", + "freq_curve = imp.calc_freq_curve() # impact exceedance frequency curve\n", + "freq_curve.plot()\n", "\n", - "print('Expected average annual impact: {:.3e} USD'.format(imp.aai_agg))" + "print(\"Expected average annual impact: {:.3e} USD\".format(imp.aai_agg))" ] }, { @@ -1071,7 +1084,7 @@ } ], "source": [ - "imp.plot_basemap_eai_exposure(buffer=0.1); # average annual impact at each exposure" + "imp.plot_basemap_eai_exposure(buffer=0.1); # average annual impact at each exposure" ] }, { @@ -1186,9 +1199,12 @@ "from climada.engine import CostBenefit\n", "\n", "cost_ben = CostBenefit()\n", - "cost_ben.calc(haz, ent, future_year=2040) # prints costs and benefits\n", - "cost_ben.plot_cost_benefit(); # plot cost benefit ratio and averted damage of every exposure\n", - "cost_ben.plot_event_view(return_per=(10, 20, 40)); # plot averted damage of each measure for every return period" + "cost_ben.calc(haz, ent, future_year=2040) # prints costs and benefits\n", + "cost_ben.plot_cost_benefit()\n", + "# plot cost benefit ratio and averted damage of every exposure\n", + "cost_ben.plot_event_view(\n", + " return_per=(10, 20, 40)\n", + "); # plot averted damage of each measure for every return period" ] }, { diff --git a/doc/tutorial/climada_engine_CostBenefit.ipynb b/doc/tutorial/climada_engine_CostBenefit.ipynb index 514bceb9e0..de98c79260 100644 --- a/doc/tutorial/climada_engine_CostBenefit.ipynb +++ b/doc/tutorial/climada_engine_CostBenefit.ipynb @@ -257,15 +257,23 @@ "\n", "client = Client()\n", "future_year = 2080\n", - "haz_present = client.get_hazard('tropical_cyclone',\n", - " properties={'country_name': 'Haiti',\n", - " 'climate_scenario': 'historical',\n", - " 'nb_synth_tracks':'10'})\n", - "haz_future = client.get_hazard('tropical_cyclone',\n", - " properties={'country_name': 'Haiti',\n", - " 'climate_scenario': 'rcp60',\n", - " 'ref_year': str(future_year),\n", - " 'nb_synth_tracks':'10'})\n" + "haz_present = client.get_hazard(\n", + " \"tropical_cyclone\",\n", + " properties={\n", + " \"country_name\": \"Haiti\",\n", + " \"climate_scenario\": \"historical\",\n", + " \"nb_synth_tracks\": \"10\",\n", + " },\n", + ")\n", + "haz_future = client.get_hazard(\n", + " \"tropical_cyclone\",\n", + " properties={\n", + " \"country_name\": \"Haiti\",\n", + " \"climate_scenario\": \"rcp60\",\n", + " \"ref_year\": str(future_year),\n", + " \"nb_synth_tracks\": \"10\",\n", + " },\n", + ")" ] }, { @@ -366,7 +374,7 @@ } ], "source": [ - "exp_present = client.get_litpop(country='Haiti')" + "exp_present = client.get_litpop(country=\"Haiti\")" ] }, { @@ -388,8 +396,8 @@ "exp_future.ref_year = future_year\n", "n_years = exp_future.ref_year - exp_present.ref_year + 1\n", "growth_rate = 1.02\n", - "growth = growth_rate ** n_years\n", - "exp_future.gdf['value'] = exp_future.gdf['value'] * growth" + "growth = growth_rate**n_years\n", + "exp_future.gdf[\"value\"] = exp_future.gdf[\"value\"] * growth" ] }, { @@ -517,8 +525,8 @@ "source": [ "# This would be done automatically in Impact calculations\n", "# but it's better to do it explicitly before the calculation\n", - "exp_present.assign_centroids(haz_present, distance='approx')\n", - "exp_future.assign_centroids(haz_future, distance='approx')" + "exp_present.assign_centroids(haz_present, distance=\"approx\")\n", + "exp_future.assign_centroids(haz_future, distance=\"approx\")" ] }, { @@ -592,9 +600,9 @@ "# This is more out of politeness, since if there's only one impact function\n", "# and one `impf_` column, CLIMADA can figure it out\n", "exp_present.gdf.rename(columns={\"impf_\": \"impf_TC\"}, inplace=True)\n", - "exp_present.gdf['impf_TC'] = 1\n", + "exp_present.gdf[\"impf_TC\"] = 1\n", "exp_future.gdf.rename(columns={\"impf_\": \"impf_TC\"}, inplace=True)\n", - "exp_future.gdf['impf_TC'] = 1" + "exp_future.gdf[\"impf_TC\"] = 1" ] }, { @@ -619,20 +627,20 @@ "from climada.entity.measures import Measure, MeasureSet\n", "\n", "meas_1 = Measure(\n", - " haz_type='TC',\n", - " name='Measure A',\n", + " haz_type=\"TC\",\n", + " name=\"Measure A\",\n", " color_rgb=np.array([0.8, 0.1, 0.1]),\n", " cost=5000000000,\n", - " hazard_inten_imp=(1, -5), # Decrease wind speeds by 5 m/s\n", + " hazard_inten_imp=(1, -5), # Decrease wind speeds by 5 m/s\n", " risk_transf_cover=0,\n", ")\n", "\n", "meas_2 = Measure(\n", - " haz_type='TC',\n", - " name='Measure B',\n", + " haz_type=\"TC\",\n", + " name=\"Measure B\",\n", " color_rgb=np.array([0.1, 0.1, 0.8]),\n", " cost=220000000,\n", - " paa_impact=(1, -0.10), # 10% fewer assets affected\n", + " paa_impact=(1, -0.10), # 10% fewer assets affected\n", ")\n", "\n", "# gather all measures\n", @@ -684,10 +692,18 @@ "source": [ "from climada.entity import Entity\n", "\n", - "entity_present = Entity(exposures=exp_present, disc_rates=discount_zero,\n", - " impact_func_set=impf_set, measure_set=meas_set)\n", - "entity_future = Entity(exposures=exp_future, disc_rates=discount_zero,\n", - " impact_func_set=impf_set, measure_set=meas_set)" + "entity_present = Entity(\n", + " exposures=exp_present,\n", + " disc_rates=discount_zero,\n", + " impact_func_set=impf_set,\n", + " measure_set=meas_set,\n", + ")\n", + "entity_future = Entity(\n", + " exposures=exp_future,\n", + " disc_rates=discount_zero,\n", + " impact_func_set=impf_set,\n", + " measure_set=meas_set,\n", + ")" ] }, { @@ -737,8 +753,16 @@ "from climada.engine.cost_benefit import risk_aai_agg\n", "\n", "costben_measures_only = CostBenefit()\n", - "costben_measures_only.calc(haz_present, entity_present, haz_future=None, ent_future=None,\n", - " future_year=future_year, risk_func=risk_aai_agg, imp_time_depen=None, save_imp=True)" + "costben_measures_only.calc(\n", + " haz_present,\n", + " entity_present,\n", + " haz_future=None,\n", + " ent_future=None,\n", + " future_year=future_year,\n", + " risk_func=risk_aai_agg,\n", + " imp_time_depen=None,\n", + " save_imp=True,\n", + ")" ] }, { @@ -783,10 +807,12 @@ } ], "source": [ - "combined_costben = costben_measures_only.combine_measures(['Measure A', 'Measure B'],\n", - " 'Combined measures',\n", - " new_color=np.array([0.1, 0.8, 0.8]),\n", - " disc_rates=discount_zero)" + "combined_costben = costben_measures_only.combine_measures(\n", + " [\"Measure A\", \"Measure B\"],\n", + " \"Combined measures\",\n", + " new_color=np.array([0.1, 0.8, 0.8]),\n", + " disc_rates=discount_zero,\n", + ")" ] }, { @@ -877,8 +903,16 @@ ], "source": [ "costben = CostBenefit()\n", - "costben.calc(haz_present, entity_present, haz_future=haz_future, ent_future=entity_future,\n", - " future_year=future_year, risk_func=risk_aai_agg, imp_time_depen=1, save_imp=True)" + "costben.calc(\n", + " haz_present,\n", + " entity_present,\n", + " haz_future=haz_future,\n", + " ent_future=entity_future,\n", + " future_year=future_year,\n", + " risk_func=risk_aai_agg,\n", + " imp_time_depen=1,\n", + " save_imp=True,\n", + ")" ] }, { @@ -939,8 +973,10 @@ "source": [ "# define this as a function because we'll use it again later\n", "def waterfall():\n", - " return costben.plot_waterfall(haz_present, entity_present, haz_future, entity_future,\n", - " risk_func=risk_aai_agg)\n", + " return costben.plot_waterfall(\n", + " haz_present, entity_present, haz_future, entity_future, risk_func=risk_aai_agg\n", + " )\n", + "\n", "\n", "ax = waterfall()" ] @@ -992,8 +1028,15 @@ } ], "source": [ - "costben.plot_arrow_averted(axis = waterfall(), in_meas_names=['Measure A', 'Measure B'], accumulate=True, combine=False,\n", - " risk_func=risk_aai_agg, disc_rates=None, imp_time_depen=1)" + "costben.plot_arrow_averted(\n", + " axis=waterfall(),\n", + " in_meas_names=[\"Measure A\", \"Measure B\"],\n", + " accumulate=True,\n", + " combine=False,\n", + " risk_func=risk_aai_agg,\n", + " disc_rates=None,\n", + " imp_time_depen=1,\n", + ")" ] }, { @@ -1025,10 +1068,18 @@ }, "outputs": [], "source": [ - "entity_present_disc = Entity(exposures=exp_present, disc_rates=discount_stern,\n", - " impact_func_set=impf_set, measure_set=meas_set)\n", - "entity_future_disc = Entity(exposures=exp_future, disc_rates=discount_stern,\n", - " impact_func_set=impf_set, measure_set=meas_set)" + "entity_present_disc = Entity(\n", + " exposures=exp_present,\n", + " disc_rates=discount_stern,\n", + " impact_func_set=impf_set,\n", + " measure_set=meas_set,\n", + ")\n", + "entity_future_disc = Entity(\n", + " exposures=exp_future,\n", + " disc_rates=discount_stern,\n", + " impact_func_set=impf_set,\n", + " measure_set=meas_set,\n", + ")" ] }, { @@ -1083,9 +1134,17 @@ ], "source": [ "costben_disc = CostBenefit()\n", - "costben_disc.calc(haz_present, entity_present_disc, haz_future=haz_future, ent_future=entity_future_disc,\n", - " future_year=future_year, risk_func=risk_aai_agg, imp_time_depen=1, save_imp=True)\n", - "print(costben_disc.imp_meas_future['no measure']['impact'].imp_mat.shape)" + "costben_disc.calc(\n", + " haz_present,\n", + " entity_present_disc,\n", + " haz_future=haz_future,\n", + " ent_future=entity_future_disc,\n", + " future_year=future_year,\n", + " risk_func=risk_aai_agg,\n", + " imp_time_depen=1,\n", + " save_imp=True,\n", + ")\n", + "print(costben_disc.imp_meas_future[\"no measure\"][\"impact\"].imp_mat.shape)" ] }, { @@ -1194,18 +1253,22 @@ } ], "source": [ - "combined_costben_disc = costben_disc.combine_measures(['Measure A', 'Measure B'],\n", - " 'Combined measures',\n", - " new_color=np.array([0.1, 0.8, 0.8]),\n", - " disc_rates=discount_stern)\n", - "efc_present = costben_disc.imp_meas_present['no measure']['efc']\n", - "efc_future = costben_disc.imp_meas_future['no measure']['efc']\n", - "efc_combined_measures = combined_costben_disc.imp_meas_future['Combined measures']['efc']\n", + "combined_costben_disc = costben_disc.combine_measures(\n", + " [\"Measure A\", \"Measure B\"],\n", + " \"Combined measures\",\n", + " new_color=np.array([0.1, 0.8, 0.8]),\n", + " disc_rates=discount_stern,\n", + ")\n", + "efc_present = costben_disc.imp_meas_present[\"no measure\"][\"efc\"]\n", + "efc_future = costben_disc.imp_meas_future[\"no measure\"][\"efc\"]\n", + "efc_combined_measures = combined_costben_disc.imp_meas_future[\"Combined measures\"][\n", + " \"efc\"\n", + "]\n", "\n", "ax = plt.subplot(1, 1, 1)\n", - "efc_present.plot(axis=ax, color='blue', label='Present')\n", - "efc_future.plot(axis=ax, color='orange', label='Future, unadapted')\n", - "efc_combined_measures.plot(axis=ax, color='green', label='Future, adapted')\n", + "efc_present.plot(axis=ax, color=\"blue\", label=\"Present\")\n", + "efc_future.plot(axis=ax, color=\"orange\", label=\"Future, unadapted\")\n", + "efc_combined_measures.plot(axis=ax, color=\"green\", label=\"Future, adapted\")\n", "leg = ax.legend()" ] }, diff --git a/doc/tutorial/climada_engine_Forecast.ipynb b/doc/tutorial/climada_engine_Forecast.ipynb index 74cbd00f85..29c9a5930f 100644 --- a/doc/tutorial/climada_engine_Forecast.ipynb +++ b/doc/tutorial/climada_engine_Forecast.ipynb @@ -42,12 +42,12 @@ "metadata": {}, "outputs": [], "source": [ - "#generate hazard\n", + "# generate hazard\n", "hazard, haz_model, run_datetime, event_date = generate_WS_forecast_hazard()\n", "# generate hazard with forecasts from past dates (works only if the files have already been downloaded)\n", "# hazard, haz_model, run_datetime, event_date = generate_WS_forecast_hazard(\n", "# run_datetime=datetime(2022,5,17),\n", - "# event_date=datetime(2022,5,19)) " + "# event_date=datetime(2022,5,19))" ] }, { @@ -56,7 +56,7 @@ "metadata": {}, "outputs": [], "source": [ - "#generate vulnerability\n", + "# generate vulnerability\n", "impact_function = ImpfStormEurope.from_welker()\n", "impact_function_set = ImpactFuncSet([impact_function])" ] @@ -67,12 +67,12 @@ "metadata": {}, "outputs": [], "source": [ - "#generate exposure and save to file\n", - "filename_exp = CONFIG.local_data.save_dir.dir() / ('exp_litpop_Switzerland.hdf5')\n", + "# generate exposure and save to file\n", + "filename_exp = CONFIG.local_data.save_dir.dir() / (\"exp_litpop_Switzerland.hdf5\")\n", "if filename_exp.exists():\n", " exposure = LitPop.from_hdf5(filename_exp)\n", "else:\n", - " exposure = LitPop.from_countries('Switzerland', reference_year=2020)\n", + " exposure = LitPop.from_countries(\"Switzerland\", reference_year=2020)\n", " exposure.write_hdf5(filename_exp)" ] }, @@ -82,7 +82,7 @@ "metadata": {}, "outputs": [], "source": [ - "#create and calculate Forecast\n", + "# create and calculate Forecast\n", "CH_WS_forecast = Forecast({run_datetime: hazard}, exposure, impact_function_set)\n", "CH_WS_forecast.calc()" ] @@ -106,7 +106,7 @@ } ], "source": [ - "CH_WS_forecast.plot_imp_map(save_fig=False,close_fig=False,proj=ccrs.epsg(2056));" + "CH_WS_forecast.plot_imp_map(save_fig=False, close_fig=False, proj=ccrs.epsg(2056));" ] }, { @@ -135,7 +135,7 @@ } ], "source": [ - "CH_WS_forecast.plot_hist(save_fig=False,close_fig=False);" + "CH_WS_forecast.plot_hist(save_fig=False, close_fig=False);" ] }, { @@ -164,7 +164,9 @@ } ], "source": [ - "CH_WS_forecast.plot_exceedence_prob(threshold=5000, save_fig=False, close_fig=False,proj=ccrs.epsg(2056));" + "CH_WS_forecast.plot_exceedence_prob(\n", + " threshold=5000, save_fig=False, close_fig=False, proj=ccrs.epsg(2056)\n", + ");" ] }, { @@ -198,31 +200,30 @@ "from climada.util.config import CONFIG\n", "\n", "\n", - "#create a file containing the polygons of Swiss cantons using natural earth\n", - "cantons_file = CONFIG.local_data.save_dir.dir() / 'cantons.shp'\n", - "adm1_shape_file = shapereader.natural_earth(resolution='10m',\n", - " category='cultural',\n", - " name='admin_1_states_provinces')\n", + "# create a file containing the polygons of Swiss cantons using natural earth\n", + "cantons_file = CONFIG.local_data.save_dir.dir() / \"cantons.shp\"\n", + "adm1_shape_file = shapereader.natural_earth(\n", + " resolution=\"10m\", category=\"cultural\", name=\"admin_1_states_provinces\"\n", + ")\n", "if not cantons_file.exists():\n", - " with fiona.open(adm1_shape_file, 'r') as source:\n", - " with fiona.open(\n", - " cantons_file, 'w',\n", - " **source.meta) as sink:\n", + " with fiona.open(adm1_shape_file, \"r\") as source:\n", + " with fiona.open(cantons_file, \"w\", **source.meta) as sink:\n", "\n", " for f in source:\n", - " if f['properties']['adm0_a3'] == 'CHE':\n", + " if f[\"properties\"][\"adm0_a3\"] == \"CHE\":\n", " sink.write(f)\n", - "CH_WS_forecast.plot_warn_map(str(cantons_file),\n", - " decision_level = 'polygon',\n", - " thresholds=[100000,500000,\n", - " 1000000,5000000],\n", - " probability_aggregation='mean',\n", - " area_aggregation='sum',\n", - " title=\"Building damage warning\",\n", - " explain_text=\"warn level based on aggregated damages\",\n", - " save_fig=False,\n", - " close_fig=False,\n", - " proj=ccrs.epsg(2056));" + "CH_WS_forecast.plot_warn_map(\n", + " str(cantons_file),\n", + " decision_level=\"polygon\",\n", + " thresholds=[100000, 500000, 1000000, 5000000],\n", + " probability_aggregation=\"mean\",\n", + " area_aggregation=\"sum\",\n", + " title=\"Building damage warning\",\n", + " explain_text=\"warn level based on aggregated damages\",\n", + " save_fig=False,\n", + " close_fig=False,\n", + " proj=ccrs.epsg(2056),\n", + ");" ] }, { @@ -255,43 +256,43 @@ "\n", "### generate exposure\n", "# find out which hazard coord to consider\n", - "CHE_borders = u_plot._get_borders(np.stack([exposure.gdf['latitude'].values,\n", - " exposure.gdf['longitude'].values],\n", - " axis=1)\n", - " )\n", - "centroid_selection = np.logical_and(np.logical_and(hazard.centroids.lat >= CHE_borders[2],\n", - " hazard.centroids.lat <= CHE_borders[3]),\n", - " np.logical_and(hazard.centroids.lon >= CHE_borders[0],\n", - " hazard.centroids.lon <= CHE_borders[1])\n", - " )\n", + "CHE_borders = u_plot._get_borders(\n", + " np.stack(\n", + " [exposure.gdf[\"latitude\"].values, exposure.gdf[\"longitude\"].values], axis=1\n", + " )\n", + ")\n", + "centroid_selection = np.logical_and(\n", + " np.logical_and(\n", + " hazard.centroids.lat >= CHE_borders[2], hazard.centroids.lat <= CHE_borders[3]\n", + " ),\n", + " np.logical_and(\n", + " hazard.centroids.lon >= CHE_borders[0], hazard.centroids.lon <= CHE_borders[1]\n", + " ),\n", + ")\n", "# Fill DataFrame with values for a \"neutral\" exposure (value = 1)\n", "\n", "exp_df = DataFrame()\n", - "exp_df['value'] = np.ones_like(hazard.centroids.lat[centroid_selection]) # provide value\n", - "exp_df['latitude'] = hazard.centroids.lat[centroid_selection]\n", - "exp_df['longitude'] = hazard.centroids.lon[centroid_selection]\n", - "exp_df['impf_WS'] = np.ones_like(hazard.centroids.lat[centroid_selection], int)\n", + "exp_df[\"value\"] = np.ones_like(\n", + " hazard.centroids.lat[centroid_selection]\n", + ") # provide value\n", + "exp_df[\"latitude\"] = hazard.centroids.lat[centroid_selection]\n", + "exp_df[\"longitude\"] = hazard.centroids.lon[centroid_selection]\n", + "exp_df[\"impf_WS\"] = np.ones_like(hazard.centroids.lat[centroid_selection], int)\n", "# Generate Exposures\n", "exp = Exposures(exp_df)\n", "exp.check()\n", - "exp.value_unit = 'warn_level'\n", + "exp.value_unit = \"warn_level\"\n", "\n", "### generate impact functions\n", "## impact functions for hazard based warnings\n", - "haz_type = 'WS'\n", + "haz_type = \"WS\"\n", "idx = 1\n", - "name = 'warn_level_low_elevation'\n", - "intensity_unit = 'm/s'\n", - "intensity = np.array([0.0, 19.439, \n", - " 19.44, 24.999, \n", - " 25.0, 30.549, \n", - " 30.55, 38.879, \n", - " 38.88, 100.0])\n", - "mdd = np.array([1.0, 1.0, \n", - " 2.0, 2.0, \n", - " 3.0, 3.0, \n", - " 4.0, 4.0, \n", - " 5.0, 5.0])\n", + "name = \"warn_level_low_elevation\"\n", + "intensity_unit = \"m/s\"\n", + "intensity = np.array(\n", + " [0.0, 19.439, 19.44, 24.999, 25.0, 30.549, 30.55, 38.879, 38.88, 100.0]\n", + ")\n", + "mdd = np.array([1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 5.0, 5.0])\n", "paa = np.ones_like(mdd)\n", "imp_fun_low = ImpactFunc(haz_type, idx, intensity, mdd, paa, intensity_unit, name)\n", "imp_fun_low.check()\n", @@ -305,7 +306,7 @@ "metadata": {}, "outputs": [], "source": [ - "#create and calculate Forecast\n", + "# create and calculate Forecast\n", "warn_forecast = Forecast({run_datetime: hazard}, exp, impf_set)\n", "warn_forecast.calc()" ] @@ -336,16 +337,18 @@ } ], "source": [ - "warn_forecast.plot_warn_map(cantons_file,\n", - " thresholds=[2,3,4,5],\n", - " decision_level = 'exposure_point',\n", - " probability_aggregation=0.5,\n", - " area_aggregation=0.5,\n", - " title=\"DWD ICON METEOROLOGICAL WARNING\",\n", - " explain_text=\"warn level based on wind gust thresholds\",\n", - " save_fig=False,\n", - " close_fig=False,\n", - " proj=ccrs.epsg(2056));" + "warn_forecast.plot_warn_map(\n", + " cantons_file,\n", + " thresholds=[2, 3, 4, 5],\n", + " decision_level=\"exposure_point\",\n", + " probability_aggregation=0.5,\n", + " area_aggregation=0.5,\n", + " title=\"DWD ICON METEOROLOGICAL WARNING\",\n", + " explain_text=\"warn level based on wind gust thresholds\",\n", + " save_fig=False,\n", + " close_fig=False,\n", + " proj=ccrs.epsg(2056),\n", + ");" ] }, { @@ -390,4 +393,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/doc/tutorial/climada_engine_Impact.ipynb b/doc/tutorial/climada_engine_Impact.ipynb index bbe55afd68..b6ea21cd89 100644 --- a/doc/tutorial/climada_engine_Impact.ipynb +++ b/doc/tutorial/climada_engine_Impact.ipynb @@ -329,7 +329,9 @@ "from climada.entity import LitPop\n", "\n", "# Cuba with resolution 10km and financial_mode = income group.\n", - "exp_lp = LitPop.from_countries(countries=['CUB'], res_arcsec=300, fin_mode='income_group')\n", + "exp_lp = LitPop.from_countries(\n", + " countries=[\"CUB\"], res_arcsec=300, fin_mode=\"income_group\"\n", + ")\n", "exp_lp.check()" ] }, @@ -492,7 +494,7 @@ "# not needed for impact calculations\n", "# visualize the define exposure\n", "exp_lp.plot_raster()\n", - "print('\\n Raster properties exposures:', exp_lp.meta)" + "print(\"\\n Raster properties exposures:\", exp_lp.meta)" ] }, { @@ -540,13 +542,17 @@ "from climada.hazard import TCTracks, TropCyclone, Centroids\n", "\n", "# Load histrocial tropical cyclone tracks from ibtracs over the North Atlantic basin between 2010-2012\n", - "ibtracks_na = TCTracks.from_ibtracs_netcdf(provider='usa', basin='NA', year_range=(2010, 2012), correct_pres=True)\n", - "print('num tracks hist:', ibtracks_na.size)\n", + "ibtracks_na = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", basin=\"NA\", year_range=(2010, 2012), correct_pres=True\n", + ")\n", + "print(\"num tracks hist:\", ibtracks_na.size)\n", "\n", - "ibtracks_na.equal_timestep(0.5) # Interpolation to make the track smooth and to allow applying calc_perturbed_trajectories\n", + "ibtracks_na.equal_timestep(\n", + " 0.5\n", + ") # Interpolation to make the track smooth and to allow applying calc_perturbed_trajectories\n", "# Add randomly generated tracks using the calc_perturbed_trajectories method (1 per historical track)\n", "ibtracks_na.calc_perturbed_trajectories(nb_synth_tracks=1)\n", - "print('num tracks hist+syn:', ibtracks_na.size)" + "print(\"num tracks hist+syn:\", ibtracks_na.size)" ] }, { @@ -620,8 +626,8 @@ "outputs": [], "source": [ "# Define the centroids from the exposures position\n", - "lat = exp_lp.gdf['latitude'].values\n", - "lon = exp_lp.gdf['longitude'].values\n", + "lat = exp_lp.gdf[\"latitude\"].values\n", + "lon = exp_lp.gdf[\"longitude\"].values\n", "centrs = Centroids.from_lat_lon(lat, lon)\n", "centrs.check()" ] @@ -702,6 +708,7 @@ "outputs": [], "source": [ "from climada.entity import ImpactFuncSet, ImpfTropCyclone\n", + "\n", "# impact function TC\n", "impf_tc = ImpfTropCyclone.from_emanuel_usa()\n", "\n", @@ -865,7 +872,7 @@ "source": [ "# Exposures: rename column and assign id\n", "exp_lp.gdf.rename(columns={\"impf_\": \"impf_\" + haz_type}, inplace=True)\n", - "exp_lp.gdf['impf_' + haz_type] = haz_id\n", + "exp_lp.gdf[\"impf_\" + haz_type] = haz_id\n", "exp_lp.check()\n", "exp_lp.gdf.head()" ] @@ -910,7 +917,10 @@ "source": [ "# Compute impact\n", "from climada.engine import ImpactCalc\n", - "imp = ImpactCalc(exp_lp, impf_set, tc).impact(save_mat=False) # Do not save the results geographically resolved (only aggregate values)" + "\n", + "imp = ImpactCalc(exp_lp, impf_set, tc).impact(\n", + " save_mat=False\n", + ") # Do not save the results geographically resolved (only aggregate values)" ] }, { @@ -1215,25 +1225,27 @@ "from datetime import datetime, date\n", "import pandas as pd\n", "\n", - "#set a harvest date\n", - "harvest_DOY=290 #17 October\n", + "# set a harvest date\n", + "harvest_DOY = 290 # 17 October\n", "\n", - "#loop over all events an check if they happened before or after harvest\n", - "event_ids_post_harvest=[]\n", - "event_ids_pre_harvest=[]\n", + "# loop over all events an check if they happened before or after harvest\n", + "event_ids_post_harvest = []\n", + "event_ids_pre_harvest = []\n", "for event_id in tc.event_id:\n", - " event_date = tc.date[np.where(tc.event_id==event_id)[0][0]]\n", - " day_of_year = event_date - date(datetime.fromordinal(event_date).year, 1, 1).toordinal() + 1\n", + " event_date = tc.date[np.where(tc.event_id == event_id)[0][0]]\n", + " day_of_year = (\n", + " event_date - date(datetime.fromordinal(event_date).year, 1, 1).toordinal() + 1\n", + " )\n", "\n", - " if day_of_year > harvest_DOY:\n", - " event_ids_post_harvest.append(event_id)\n", - " else:\n", - " event_ids_pre_harvest.append(event_id)\n", + " if day_of_year > harvest_DOY:\n", + " event_ids_post_harvest.append(event_id)\n", + " else:\n", + " event_ids_pre_harvest.append(event_id)\n", "\n", - "tc_post_harvest=tc.select(event_id=event_ids_post_harvest)\n", - "tc_pre_harvest=tc.select(event_id=event_ids_pre_harvest)\n", - "#print('pre-harvest:', tc_pre_harvest.event_name)\n", - "#print('post-harvest:', tc_post_harvest.event_name)" + "tc_post_harvest = tc.select(event_id=event_ids_post_harvest)\n", + "tc_pre_harvest = tc.select(event_id=event_ids_pre_harvest)\n", + "# print('pre-harvest:', tc_pre_harvest.event_name)\n", + "# print('post-harvest:', tc_post_harvest.event_name)" ] }, { @@ -1285,18 +1297,19 @@ ], "source": [ "from climada.engine import Impact\n", + "\n", "# impact function TC\n", "impf_tc = ImpfTropCyclone.from_emanuel_usa()\n", "# impact function TC after harvest is by factor 0.5 smaller\n", "impf_tc_posth = ImpfTropCyclone.from_emanuel_usa()\n", - "impf_tc_posth.mdd = impf_tc.mdd*0.1\n", + "impf_tc_posth.mdd = impf_tc.mdd * 0.1\n", "# add the impact function to an Impact function set\n", "impf_set = ImpactFuncSet([impf_tc])\n", "impf_set_posth = ImpactFuncSet([impf_tc_posth])\n", "impf_set.check()\n", "impf_set_posth.check()\n", "\n", - "#plot\n", + "# plot\n", "impf_set.plot()\n", "impf_set_posth.plot()\n", "\n", @@ -1360,16 +1373,17 @@ ], "source": [ "# Concatenate impacts again\n", - "imp_tot = Impact.concat([imp_preh,imp_posth])\n", + "imp_tot = Impact.concat([imp_preh, imp_posth])\n", "\n", - "#plot result\n", + "# plot result\n", "import matplotlib.pyplot as plt\n", - "ax=imp_preh.plot_hexbin_eai_exposure(gridsize=100,adapt_fontsize=False)\n", - "ax.set_title('Expected annual impact: Pre-Harvest')\n", - "ax=imp_posth.plot_hexbin_eai_exposure(gridsize=100,adapt_fontsize=False)\n", - "ax.set_title('Expected annual impact: Post-Harvest')\n", - "ax=imp_tot.plot_hexbin_eai_exposure(gridsize=100,adapt_fontsize=False)\n", - "ax.set_title('Expected annual impact: Total')\n" + "\n", + "ax = imp_preh.plot_hexbin_eai_exposure(gridsize=100, adapt_fontsize=False)\n", + "ax.set_title(\"Expected annual impact: Pre-Harvest\")\n", + "ax = imp_posth.plot_hexbin_eai_exposure(gridsize=100, adapt_fontsize=False)\n", + "ax.set_title(\"Expected annual impact: Post-Harvest\")\n", + "ax = imp_tot.plot_hexbin_eai_exposure(gridsize=100, adapt_fontsize=False)\n", + "ax.set_title(\"Expected annual impact: Total\")" ] }, { @@ -1459,22 +1473,34 @@ "from climada.engine import ImpactCalc\n", "\n", "# Set Exposures in points\n", - "exp_pnt = Exposures(crs='epsg:4326') #set coordinate system\n", - "exp_pnt.gdf['latitude'] = np.array([21.899326, 21.960728, 22.220574, 22.298390, 21.787977, 21.787977, 21.981732])\n", - "exp_pnt.gdf['longitude'] = np.array([88.307422, 88.565362, 88.378337, 87.806356, 88.348835, 88.348835, 89.246521])\n", - "exp_pnt.gdf['value'] = np.array([1.0e5, 1.2e5, 1.1e5, 1.1e5, 2.0e5, 2.5e5, 0.5e5])\n", + "exp_pnt = Exposures(crs=\"epsg:4326\") # set coordinate system\n", + "exp_pnt.gdf[\"latitude\"] = np.array(\n", + " [21.899326, 21.960728, 22.220574, 22.298390, 21.787977, 21.787977, 21.981732]\n", + ")\n", + "exp_pnt.gdf[\"longitude\"] = np.array(\n", + " [88.307422, 88.565362, 88.378337, 87.806356, 88.348835, 88.348835, 89.246521]\n", + ")\n", + "exp_pnt.gdf[\"value\"] = np.array([1.0e5, 1.2e5, 1.1e5, 1.1e5, 2.0e5, 2.5e5, 0.5e5])\n", "exp_pnt.check()\n", "exp_pnt.plot_scatter(buffer=0.05)\n", "\n", "# Set Hazard in Exposures points\n", "# set centroids from exposures coordinates\n", - "centr_pnt = Centroids.from_lat_lon(exp_pnt.gdf['latitude'].values, exp_pnt.gdf['longitude'].values, exp_pnt.crs)\n", + "centr_pnt = Centroids.from_lat_lon(\n", + " exp_pnt.gdf[\"latitude\"].values, exp_pnt.gdf[\"longitude\"].values, exp_pnt.crs\n", + ")\n", "# compute Hazard in that centroids\n", - "tr_pnt = TCTracks.from_ibtracs_netcdf(storm_id='2007314N10093')\n", + "tr_pnt = TCTracks.from_ibtracs_netcdf(storm_id=\"2007314N10093\")\n", "tc_pnt = TropCyclone.from_tracks(tr_pnt, centroids=centr_pnt)\n", "tc_pnt.check()\n", - "ax_pnt = tc_pnt.centroids.plot(c=np.array(tc_pnt.intensity[0,:].todense()).squeeze()) # plot intensity per point\n", - "ax_pnt.get_figure().colorbar(ax_pnt.collections[0], fraction=0.0175, pad=0.02).set_label('Intensity (m/s)') # add colorbar\n", + "ax_pnt = tc_pnt.centroids.plot(\n", + " c=np.array(tc_pnt.intensity[0, :].todense()).squeeze()\n", + ") # plot intensity per point\n", + "ax_pnt.get_figure().colorbar(\n", + " ax_pnt.collections[0], fraction=0.0175, pad=0.02\n", + ").set_label(\n", + " \"Intensity (m/s)\"\n", + ") # add colorbar\n", "\n", "# Set impact function\n", "impf_tc = ImpfTropCyclone.from_emanuel_usa()\n", @@ -1486,13 +1512,16 @@ "[haz_id] = impf_set.get_ids()[haz_type]\n", "# Exposures: rename column and assign id\n", "exp_lp.gdf.rename(columns={\"impf_\": \"impf_\" + haz_type}, inplace=True)\n", - "exp_lp.gdf['impf_' + haz_type] = haz_id\n", + "exp_lp.gdf[\"impf_\" + haz_type] = haz_id\n", "exp_lp.gdf.head()\n", "\n", "# Compute Impact\n", "imp_pnt = ImpactCalc(exp_pnt, impf_pnt, tc_pnt).impact()\n", "# nearest neighbor of exposures to centroids gives identity\n", - "print('Nearest neighbor hazard.centroids indexes for each exposure:', exp_pnt.gdf['centr_TC'].values)\n", + "print(\n", + " \"Nearest neighbor hazard.centroids indexes for each exposure:\",\n", + " exp_pnt.gdf[\"centr_TC\"].values,\n", + ")\n", "imp_pnt.plot_scatter_eai_exposure(ignore_zero=False, buffer=0.05);" ] }, @@ -1680,24 +1709,32 @@ "from climada.util.constants import HAZ_DEMO_FL\n", "\n", "# Exposures belonging to a raster (the raser information is contained in the meta attribute)\n", - "exp_ras = LitPop.from_countries(countries=['VEN'], res_arcsec=300, fin_mode='income_group')\n", + "exp_ras = LitPop.from_countries(\n", + " countries=[\"VEN\"], res_arcsec=300, fin_mode=\"income_group\"\n", + ")\n", "exp_ras.gdf.reset_index()\n", "exp_ras.check()\n", "exp_ras.plot_raster()\n", - "print('\\n Raster properties exposures:', exp_ras.meta)\n", + "print(\"\\n Raster properties exposures:\", exp_ras.meta)\n", "\n", "# Initialize hazard object with haz_type = 'FL' (for Flood)\n", - "hazard_type='FL'\n", + "hazard_type = \"FL\"\n", "# Load a previously generated (either with CLIMADA or other means) hazard\n", "# from file (HAZ_DEMO_FL) and resample the hazard raster to the exposures' ones\n", "# Hint: check how other resampling methods affect to final impact\n", - "haz_ras = Hazard.from_raster([HAZ_DEMO_FL], haz_type=hazard_type, dst_crs=exp_ras.meta['crs'], transform=exp_ras.meta['transform'],\n", - " width=exp_ras.meta['width'], height=exp_ras.meta['height'],\n", - " resampling=Resampling.nearest)\n", - "haz_ras.intensity[haz_ras.intensity==-9999] = 0 # correct no data values\n", + "haz_ras = Hazard.from_raster(\n", + " [HAZ_DEMO_FL],\n", + " haz_type=hazard_type,\n", + " dst_crs=exp_ras.meta[\"crs\"],\n", + " transform=exp_ras.meta[\"transform\"],\n", + " width=exp_ras.meta[\"width\"],\n", + " height=exp_ras.meta[\"height\"],\n", + " resampling=Resampling.nearest,\n", + ")\n", + "haz_ras.intensity[haz_ras.intensity == -9999] = 0 # correct no data values\n", "haz_ras.check()\n", "haz_ras.plot_intensity(1)\n", - "print('Raster properties centroids:', haz_ras.centroids.meta)\n", + "print(\"Raster properties centroids:\", haz_ras.centroids.meta)\n", "\n", "# Set dummy impact function\n", "intensity = np.linspace(0, 10, 100)\n", @@ -1710,13 +1747,16 @@ "\n", "# Exposures: rename column and assign id\n", "exp_lp.gdf.rename(columns={\"impf_\": \"impf_\" + hazard_type}, inplace=True)\n", - "exp_lp.gdf['impf_' + haz_type] = haz_id\n", + "exp_lp.gdf[\"impf_\" + haz_type] = haz_id\n", "exp_lp.gdf.head()\n", "\n", "# Compute impact\n", "imp_ras = ImpactCalc(exp_ras, impf_ras, haz_ras).impact(save_mat=False)\n", "# nearest neighbor of exposures to centroids is not identity because litpop does not contain data outside the country polygon\n", - "print('\\n Nearest neighbor hazard.centroids indexes for each exposure:', exp_ras.gdf['centr_FL'].values)\n", + "print(\n", + " \"\\n Nearest neighbor hazard.centroids indexes for each exposure:\",\n", + " exp_ras.gdf[\"centr_FL\"].values,\n", + ")\n", "imp_ras.plot_raster_eai_exposure();" ] }, @@ -1957,7 +1997,7 @@ "from climada_petals.entity import BlackMarble\n", "\n", "exp_video = BlackMarble()\n", - "exp_video.set_countries(['Cuba'], 2016, res_km=2.5)\n", + "exp_video.set_countries([\"Cuba\"], 2016, res_km=2.5)\n", "exp_video.check()\n", "\n", "# impact function\n", @@ -1967,19 +2007,23 @@ "\n", "# compute sequence of hazards using TropCyclone video_intensity method\n", "exp_sea = add_sea(exp_video, (100, 5))\n", - "centr_video = Centroids.from_lat_lon(exp_sea.gdf['latitude'].values, exp_sea.gdf['longitude'].values)\n", + "centr_video = Centroids.from_lat_lon(\n", + " exp_sea.gdf[\"latitude\"].values, exp_sea.gdf[\"longitude\"].values\n", + ")\n", "centr_video.check()\n", "\n", - "track_name = '2017242N16333'\n", - "tr_irma = TCTracks.from_ibtracs_netcdf(provider='usa', storm_id=track_name) # IRMA 2017\n", + "track_name = \"2017242N16333\"\n", + "tr_irma = TCTracks.from_ibtracs_netcdf(provider=\"usa\", storm_id=track_name) # IRMA 2017\n", "\n", "tc_video = TropCyclone()\n", - "tc_list, _ = tc_video.video_intensity(track_name, tr_irma, centr_video) # empty file name to not to write the video\n", + "tc_list, _ = tc_video.video_intensity(\n", + " track_name, tr_irma, centr_video\n", + ") # empty file name to not to write the video\n", "\n", "# generate video of impacts\n", - "file_name='./results/irma_imp_fl.gif'\n", + "file_name = \"./results/irma_imp_fl.gif\"\n", "imp_video = Impact()\n", - "imp_list = imp_video.video_direct_impact(exp_video, impfs_video, tc_list, file_name)\n" + "imp_list = imp_video.video_direct_impact(exp_video, impfs_video, tc_list, file_name)" ] } ], diff --git a/doc/tutorial/climada_engine_impact_data.ipynb b/doc/tutorial/climada_engine_impact_data.ipynb index 443a6f4141..40ead3d807 100644 --- a/doc/tutorial/climada_engine_impact_data.ipynb +++ b/doc/tutorial/climada_engine_impact_data.ipynb @@ -46,11 +46,15 @@ "from matplotlib import pyplot as plt\n", "\n", "from climada.util.constants import DEMO_DIR\n", - "from climada.engine.impact_data import emdat_countries_by_hazard, \\\n", - " emdat_impact_yearlysum, emdat_to_impact, clean_emdat_df\n", + "from climada.engine.impact_data import (\n", + " emdat_countries_by_hazard,\n", + " emdat_impact_yearlysum,\n", + " emdat_to_impact,\n", + " clean_emdat_df,\n", + ")\n", "\n", "# set path to CSV file downloaded from https://public.emdat.be :\n", - "emdat_file_path = DEMO_DIR.joinpath('demo_emdat_impact_data_2020.csv')" + "emdat_file_path = DEMO_DIR.joinpath(\"demo_emdat_impact_data_2020.csv\")" ] }, { @@ -129,8 +133,12 @@ "source": [ "\"\"\"Create DataFrame df with EM-DAT entries of tropical cyclones in Thailand and Viet Nam in the years 2005 and 2006\"\"\"\n", "\n", - "df = clean_emdat_df(emdat_file_path, countries=['THA', 'Viet Nam'], hazard=['TC'], \\\n", - " year_range=[2005, 2006])\n", + "df = clean_emdat_df(\n", + " emdat_file_path,\n", + " countries=[\"THA\", \"Viet Nam\"],\n", + " hazard=[\"TC\"],\n", + " year_range=[2005, 2006],\n", + ")\n", "print(df)" ] }, @@ -160,7 +168,9 @@ "source": [ "\"\"\"emdat_countries_by_hazard: get lists of countries impacted by tropical cyclones from 2010 to 2019\"\"\"\n", "\n", - "iso3_codes, country_names = emdat_countries_by_hazard(emdat_file_path, hazard='TC', year_range=(2010, 2019))\n", + "iso3_codes, country_names = emdat_countries_by_hazard(\n", + " emdat_file_path, hazard=\"TC\", year_range=(2010, 2019)\n", + ")\n", "\n", "print(country_names)\n", "\n", @@ -214,11 +224,18 @@ "source": [ "\"\"\"Global TC damages 2000 to 2009\"\"\"\n", "\n", - "impact_emdat, countries = emdat_to_impact(emdat_file_path, 'TC', year_range=(2000,2009))\n", - "\n", - "print('Number of TC events in EM-DAT 2000 to 2009 globally: %i' %(impact_emdat.event_id.size))\n", - "print('Global annual average monetary damage (AAI) from TCs as reported in EM-DAT 2000 to 2009: USD billion %2.2f' \\\n", - " %(impact_emdat.aai_agg/1e9))\n" + "impact_emdat, countries = emdat_to_impact(\n", + " emdat_file_path, \"TC\", year_range=(2000, 2009)\n", + ")\n", + "\n", + "print(\n", + " \"Number of TC events in EM-DAT 2000 to 2009 globally: %i\"\n", + " % (impact_emdat.event_id.size)\n", + ")\n", + "print(\n", + " \"Global annual average monetary damage (AAI) from TCs as reported in EM-DAT 2000 to 2009: USD billion %2.2f\"\n", + " % (impact_emdat.aai_agg / 1e9)\n", + ")" ] }, { @@ -267,26 +284,34 @@ "\"\"\"Total people affected by TCs in the Philippines in 2013:\"\"\"\n", "\n", "# People affected\n", - "impact_emdat_PHL, countries = emdat_to_impact(emdat_file_path, 'TC', countries='PHL', \\\n", - " year_range=(2013,2013), imp_str=\"Total Affected\")\n", - "\n", - "print('Number of TC events in EM-DAT in the Philipppines, 2013: %i' \\\n", - " %(impact_emdat_PHL.event_id.size))\n", - "print('\\nPeople affected by TC events in the Philippines in 2013 (per event):')\n", + "impact_emdat_PHL, countries = emdat_to_impact(\n", + " emdat_file_path,\n", + " \"TC\",\n", + " countries=\"PHL\",\n", + " year_range=(2013, 2013),\n", + " imp_str=\"Total Affected\",\n", + ")\n", + "\n", + "print(\n", + " \"Number of TC events in EM-DAT in the Philipppines, 2013: %i\"\n", + " % (impact_emdat_PHL.event_id.size)\n", + ")\n", + "print(\"\\nPeople affected by TC events in the Philippines in 2013 (per event):\")\n", "print(impact_emdat_PHL.at_event)\n", - "print('\\nPeople affected by TC events in the Philippines in 2013 (total):')\n", + "print(\"\\nPeople affected by TC events in the Philippines in 2013 (total):\")\n", "print(int(impact_emdat_PHL.aai_agg))\n", "\n", "# Comparison to monetary damages:\n", - "impact_emdat_PHL_USD, _ = emdat_to_impact(emdat_file_path, 'TC', countries='PHL', \\\n", - " year_range=(2013,2013))\n", + "impact_emdat_PHL_USD, _ = emdat_to_impact(\n", + " emdat_file_path, \"TC\", countries=\"PHL\", year_range=(2013, 2013)\n", + ")\n", "\n", "ax = plt.scatter(impact_emdat_PHL_USD.at_event, impact_emdat_PHL.at_event)\n", - "plt.title('Typhoon impacts in the Philippines, 2013')\n", - "plt.xlabel('Total Damage [USD]')\n", - "plt.ylabel('People Affected');\n", - "#plt.xscale('log')\n", - "#plt.yscale('log')" + "plt.title(\"Typhoon impacts in the Philippines, 2013\")\n", + "plt.xlabel(\"Total Damage [USD]\")\n", + "plt.ylabel(\"People Affected\");\n", + "# plt.xscale('log')\n", + "# plt.yscale('log')" ] }, { @@ -352,23 +377,40 @@ "source": [ "\"\"\"Yearly TC damages in the USA, normalized and current\"\"\"\n", "\n", - "yearly_damage_normalized_to_2019 = emdat_impact_yearlysum(emdat_file_path, countries='USA', \\\n", - " hazard='Tropical cyclone', year_range=None, \\\n", - " reference_year=2019)\n", + "yearly_damage_normalized_to_2019 = emdat_impact_yearlysum(\n", + " emdat_file_path,\n", + " countries=\"USA\",\n", + " hazard=\"Tropical cyclone\",\n", + " year_range=None,\n", + " reference_year=2019,\n", + ")\n", "\n", - "yearly_damage_current = emdat_impact_yearlysum(emdat_file_path, countries=['USA'], hazard='TC',)\n", + "yearly_damage_current = emdat_impact_yearlysum(\n", + " emdat_file_path,\n", + " countries=[\"USA\"],\n", + " hazard=\"TC\",\n", + ")\n", "\n", "import matplotlib.pyplot as plt\n", "\n", "fig, axis = plt.subplots(1, 1)\n", - "axis.plot(yearly_damage_current.year, yearly_damage_current.impact, 'b', label='USD current value')\n", - "axis.plot(yearly_damage_normalized_to_2019.year, yearly_damage_normalized_to_2019.impact_scaled, \\\n", - " 'r--', label='USD normalized to 2019')\n", + "axis.plot(\n", + " yearly_damage_current.year,\n", + " yearly_damage_current.impact,\n", + " \"b\",\n", + " label=\"USD current value\",\n", + ")\n", + "axis.plot(\n", + " yearly_damage_normalized_to_2019.year,\n", + " yearly_damage_normalized_to_2019.impact_scaled,\n", + " \"r--\",\n", + " label=\"USD normalized to 2019\",\n", + ")\n", "plt.legend()\n", - "axis.set_title('TC damage reported in EM-DAT in the USA')\n", + "axis.set_title(\"TC damage reported in EM-DAT in the USA\")\n", "axis.set_xticks([2000, 2004, 2008, 2012, 2016])\n", - "axis.set_xlabel('year')\n", - "axis.set_ylabel('Total Damage [USD]');\n" + "axis.set_xlabel(\"year\")\n", + "axis.set_ylabel(\"Total Damage [USD]\");" ] } ], diff --git a/doc/tutorial/climada_engine_unsequa.ipynb b/doc/tutorial/climada_engine_unsequa.ipynb index 08558632ef..a7f6fabd6c 100644 --- a/doc/tutorial/climada_engine_unsequa.ipynb +++ b/doc/tutorial/climada_engine_unsequa.ipynb @@ -154,11 +154,13 @@ "outputs": [], "source": [ "import warnings\n", - "warnings.filterwarnings('ignore') #Ignore warnings for making the tutorial's pdf.\n", "\n", - "#Define the base exposure\n", + "warnings.filterwarnings(\"ignore\") # Ignore warnings for making the tutorial's pdf.\n", + "\n", + "# Define the base exposure\n", "from climada.util.constants import EXP_DEMO_H5\n", "from climada.entity import Exposures\n", + "\n", "exp_base = Exposures.from_hdf5(EXP_DEMO_H5)" ] }, @@ -177,7 +179,7 @@ "# Here x_exp is the input uncertainty parameter and exp_func the inputvar.func.\n", "def exp_func(x_exp, exp_base=exp_base):\n", " exp = exp_base.copy()\n", - " exp.gdf['value'] *= x_exp\n", + " exp.gdf[\"value\"] *= x_exp\n", " return exp" ] }, @@ -197,8 +199,9 @@ "from climada.engine.unsequa import InputVar\n", "import scipy as sp\n", "\n", - "exp_distr = {\"x_exp\": sp.stats.uniform(0.9, 0.2),\n", - " }\n", + "exp_distr = {\n", + " \"x_exp\": sp.stats.uniform(0.9, 0.2),\n", + "}\n", "exp_iv = InputVar(exp_func, exp_distr)" ] }, @@ -249,8 +252,10 @@ ], "source": [ "# Evaluate for a given value of the uncertainty parameters\n", - "exp095 = exp_iv.func(x_exp = 0.95)\n", - "print(f\"Base value is {exp_base.gdf['value'].sum()}, and the value for x_exp=0.95 is {exp095.gdf['value'].sum()}\")" + "exp095 = exp_iv.func(x_exp=0.95)\n", + "print(\n", + " f\"Base value is {exp_base.gdf['value'].sum()}, and the value for x_exp=0.95 is {exp095.gdf['value'].sum()}\"\n", + ")" ] }, { @@ -315,11 +320,12 @@ "m_min, m_max = (1, 2)\n", "n_min, n_max = (1, 2)\n", "\n", + "\n", "# Define the function\n", "# Note that this here works, but might be slow because the method LitPop is called everytime the the function\n", "# is evaluated, and LitPop is relatively slow.\n", "def litpop_cat(m, n):\n", - " exp = Litpop.from_countries('CHE', res_arcsec=150, exponent=[m, n])\n", + " exp = Litpop.from_countries(\"CHE\", res_arcsec=150, exponent=[m, n])\n", " return exp" ] }, @@ -341,9 +347,10 @@ "litpop_dict = {}\n", "for m in range(m_min, m_max + 1):\n", " for n in range(n_min, n_max + 1):\n", - " exp_mn = LitPop.from_countries('CHE', res_arcsec=150, exponents=[m, n]);\n", + " exp_mn = LitPop.from_countries(\"CHE\", res_arcsec=150, exponents=[m, n])\n", " litpop_dict[(m, n)] = exp_mn\n", "\n", + "\n", "def litpop_cat(m, n, litpop_dict=litpop_dict):\n", " return litpop_dict[(m, n)]" ] @@ -360,16 +367,18 @@ }, "outputs": [], "source": [ - "#Define the distribution dictionnary\n", + "# Define the distribution dictionnary\n", "import scipy as sp\n", "from climada.engine.unsequa import InputVar\n", "\n", "distr_dict = {\n", - " 'm': sp.stats.randint(low=m_min, high=m_max+1),\n", - " 'n': sp.stats.randint(low=n_min, high=n_max+1)\n", - " }\n", + " \"m\": sp.stats.randint(low=m_min, high=m_max + 1),\n", + " \"n\": sp.stats.randint(low=n_min, high=n_max + 1),\n", + "}\n", "\n", - "cat_iv = InputVar(litpop_cat, distr_dict) # One can use either of the above definitions of litpop_cat" + "cat_iv = InputVar(\n", + " litpop_cat, distr_dict\n", + ") # One can use either of the above definitions of litpop_cat" ] }, { @@ -578,8 +587,9 @@ "# Requires internet connection\n", "from climada.util.constants import TEST_UNC_OUTPUT_IMPACT\n", "from climada.util.api_client import Client\n", + "\n", "apiclient = Client()\n", - "ds = apiclient.get_dataset_info(name=TEST_UNC_OUTPUT_IMPACT, status='test_dataset')\n", + "ds = apiclient.get_dataset_info(name=TEST_UNC_OUTPUT_IMPACT, status=\"test_dataset\")\n", "_target_dir, [filename] = apiclient.download_dataset(ds)" ] }, @@ -597,6 +607,7 @@ "source": [ "# If you produced your own data, you do not need the API. Just replace 'filename' with the path to your file.\n", "from climada.engine.unsequa import UncOutput\n", + "\n", "unc_imp = UncOutput.from_hdf5(filename)" ] }, @@ -623,7 +634,7 @@ } ], "source": [ - "unc_imp.plot_uncertainty(metric_list=['aai_agg'], figsize=(12,5));" + "unc_imp.plot_uncertainty(metric_list=[\"aai_agg\"], figsize=(12, 5));" ] }, { @@ -642,8 +653,9 @@ "# Requires internet connection\n", "from climada.util.constants import TEST_UNC_OUTPUT_COSTBEN\n", "from climada.util.api_client import Client\n", + "\n", "apiclient = Client()\n", - "ds = apiclient.get_dataset_info(name=TEST_UNC_OUTPUT_COSTBEN, status='test_dataset')\n", + "ds = apiclient.get_dataset_info(name=TEST_UNC_OUTPUT_COSTBEN, status=\"test_dataset\")\n", "_target_dir, [filename] = apiclient.download_dataset(ds)" ] }, @@ -661,6 +673,7 @@ "source": [ "# If you produced your own data, you do not need the API. Just replace 'filename' with the path to your file.\n", "from climada.engine.unsequa import UncOutput\n", + "\n", "unc_cb = UncOutput.from_hdf5(filename)" ] }, @@ -955,25 +968,27 @@ }, "outputs": [], "source": [ - "#Define the input variable functions\n", + "# Define the input variable functions\n", "import numpy as np\n", "\n", "from climada.entity import ImpactFunc, ImpactFuncSet, Exposures\n", "from climada.util.constants import EXP_DEMO_H5, HAZ_DEMO_H5\n", "from climada.hazard import Hazard\n", "\n", + "\n", "def impf_func(G=1, v_half=84.7, vmin=25.7, k=3, _id=1):\n", "\n", " def xhi(v, v_half, vmin):\n", " return max([(v - vmin), 0]) / (v_half - vmin)\n", "\n", " def sigmoid_func(v, G, v_half, vmin, k):\n", - " return G * xhi(v, v_half, vmin)**k / (1 + xhi(v, v_half, vmin)**k)\n", + " return G * xhi(v, v_half, vmin) ** k / (1 + xhi(v, v_half, vmin) ** k)\n", "\n", - " #In-function imports needed only for parallel computing on Windows\n", + " # In-function imports needed only for parallel computing on Windows\n", " import numpy as np\n", " from climada.entity import ImpactFunc, ImpactFuncSet\n", - " intensity_unit = 'm/s'\n", + "\n", + " intensity_unit = \"m/s\"\n", " intensity = np.linspace(0, 150, num=100)\n", " mdd = np.repeat(1, len(intensity))\n", " paa = np.array([sigmoid_func(v, G, v_half, vmin, k) for v in intensity])\n", @@ -982,16 +997,22 @@ " impf_set = ImpactFuncSet([imp_fun])\n", " return impf_set\n", "\n", + "\n", "haz = Hazard.from_hdf5(HAZ_DEMO_H5)\n", "exp_base = Exposures.from_hdf5(EXP_DEMO_H5)\n", - "#It is a good idea to assign the centroids to the base exposures in order to avoid repeating this\n", + "# It is a good idea to assign the centroids to the base exposures in order to avoid repeating this\n", "# potentially costly operation for each sample.\n", "exp_base.assign_centroids(haz)\n", + "\n", + "\n", "def exp_base_func(x_exp, exp_base):\n", " exp = exp_base.copy()\n", - " exp.gdf['value'] *= x_exp\n", + " exp.gdf[\"value\"] *= x_exp\n", " return exp\n", + "\n", + "\n", "from functools import partial\n", + "\n", "exp_func = partial(exp_base_func, exp_base=exp_base)" ] }, @@ -1018,7 +1039,7 @@ ], "source": [ "# Visualization of the parametrized impact function\n", - "impf_func(G=0.8, v_half=80, vmin=30,k=5).plot();" + "impf_func(G=0.8, v_half=80, vmin=30, k=5).plot();" ] }, { @@ -1032,13 +1053,15 @@ }, "outputs": [], "source": [ - "#Define the InputVars\n", + "# Define the InputVars\n", "\n", "import scipy as sp\n", "from climada.engine.unsequa import InputVar\n", "\n", - "exp_distr = {\"x_exp\": sp.stats.beta(10, 1.1)} #This is not really a reasonable distribution but is used\n", - " #here to show that you can use any scipy distribution.\n", + "exp_distr = {\n", + " \"x_exp\": sp.stats.beta(10, 1.1)\n", + "} # This is not really a reasonable distribution but is used\n", + "# here to show that you can use any scipy distribution.\n", "\n", "exp_iv = InputVar(exp_func, exp_distr)\n", "\n", @@ -1046,8 +1069,8 @@ " \"G\": sp.stats.truncnorm(0.5, 1.5),\n", " \"v_half\": sp.stats.uniform(35, 65),\n", " \"vmin\": sp.stats.uniform(0, 15),\n", - " \"k\": sp.stats.uniform(1, 4)\n", - " }\n", + " \"k\": sp.stats.uniform(1, 4),\n", + "}\n", "impf_iv = InputVar(impf_func, impf_distr)" ] }, @@ -1074,8 +1097,9 @@ ], "source": [ "import matplotlib.pyplot as plt\n", - "ax = exp_iv.plot(figsize=(6,4));\n", - "plt.yticks(fontsize=16);\n", + "\n", + "ax = exp_iv.plot(figsize=(6, 4))\n", + "plt.yticks(fontsize=16)\n", "plt.xticks(fontsize=16);" ] }, @@ -1215,7 +1239,7 @@ } ], "source": [ - "output_imp = calc_imp.make_sample(N=2**7, sampling_kwargs={'skip_values': 2**8})\n", + "output_imp = calc_imp.make_sample(N=2**7, sampling_kwargs={\"skip_values\": 2**8})\n", "output_imp.get_samples_df().tail()" ] }, @@ -1248,7 +1272,7 @@ } ], "source": [ - "output_imp.plot_sample(figsize=(15,8));" + "output_imp.plot_sample(figsize=(15, 8));" ] }, { @@ -1269,7 +1293,7 @@ }, "outputs": [], "source": [ - "output_imp = calc_imp.uncertainty(output_imp, rp = [50, 100, 250])" + "output_imp = calc_imp.uncertainty(output_imp, rp=[50, 100, 250])" ] }, { @@ -1306,7 +1330,7 @@ } ], "source": [ - "#All the computed uncertainty metrics attribute\n", + "# All the computed uncertainty metrics attribute\n", "output_imp.uncertainty_metrics" ] }, @@ -1384,8 +1408,8 @@ } ], "source": [ - "#One uncertainty dataframe\n", - "output_imp.get_unc_df('aai_agg').tail()" + "# One uncertainty dataframe\n", + "output_imp.get_unc_df(\"aai_agg\").tail()" ] }, { @@ -1519,7 +1543,7 @@ } ], "source": [ - "output_imp.plot_uncertainty(figsize=(12,12));" + "output_imp.plot_uncertainty(figsize=(12, 12));" ] }, { @@ -1552,7 +1576,7 @@ ], "source": [ "# Specific plot for the return period distributions\n", - "output_imp.plot_rp_uncertainty(figsize=(14.3,8));" + "output_imp.plot_rp_uncertainty(figsize=(14.3, 8));" ] }, { @@ -1704,7 +1728,7 @@ } ], "source": [ - "output_imp.get_sens_df('aai_agg').tail()" + "output_imp.get_sens_df(\"aai_agg\").tail()" ] }, { @@ -1824,7 +1848,7 @@ } ], "source": [ - "output_imp.get_sensitivity('S1')" + "output_imp.get_sensitivity(\"S1\")" ] }, { @@ -1918,7 +1942,7 @@ } ], "source": [ - "output_imp.get_largest_si(salib_si='S1')" + "output_imp.get_largest_si(salib_si=\"S1\")" ] }, { @@ -1953,7 +1977,7 @@ ], "source": [ "# Default for 'sobol' is to plot 'S1' sensitivity index.\n", - "output_imp.plot_sensitivity(figsize=(12,8));" + "output_imp.plot_sensitivity(figsize=(12, 8));" ] }, { @@ -1985,7 +2009,7 @@ } ], "source": [ - "output_imp.plot_sensitivity(salib_si = 'ST', figsize=(12,8));" + "output_imp.plot_sensitivity(salib_si=\"ST\", figsize=(12, 8));" ] }, { @@ -2017,7 +2041,7 @@ } ], "source": [ - "output_imp.plot_sensitivity_second_order(figsize=(12,8));" + "output_imp.plot_sensitivity_second_order(figsize=(12, 8));" ] }, { @@ -2050,7 +2074,7 @@ "from climada.engine.unsequa import CalcImpact\n", "\n", "calc_imp2 = CalcImpact(exp_iv, impf_iv, haz)\n", - "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method='latin')" + "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method=\"latin\")" ] }, { @@ -2075,7 +2099,7 @@ } ], "source": [ - "output_imp2.plot_sample(figsize=(15,8));" + "output_imp2.plot_sample(figsize=(15, 8));" ] }, { @@ -2104,13 +2128,15 @@ "import time\n", "\n", "calc_imp2 = CalcImpact(exp_iv, impf_iv, haz)\n", - "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method='latin')\n", + "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method=\"latin\")\n", "\n", "start = time.time()\n", - "output_imp2 = calc_imp2.uncertainty(output_imp2, rp = [50, 100, 250], calc_eai_exp=True, calc_at_event=True, processes=4)\n", + "output_imp2 = calc_imp2.uncertainty(\n", + " output_imp2, rp=[50, 100, 250], calc_eai_exp=True, calc_at_event=True, processes=4\n", + ")\n", "end = time.time()\n", - "time_passed = end-start\n", - "print(f'Time passed with pool: {time_passed}')" + "time_passed = end - start\n", + "print(f\"Time passed with pool: {time_passed}\")" ] }, { @@ -2148,13 +2174,15 @@ ], "source": [ "calc_imp2 = CalcImpact(exp_iv, impf_iv, haz)\n", - "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method='latin')\n", + "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method=\"latin\")\n", "\n", "start2 = time.time()\n", - "output_imp2 = calc_imp2.uncertainty(output_imp2, rp = [50, 100, 250], calc_eai_exp=True, calc_at_event=True)\n", + "output_imp2 = calc_imp2.uncertainty(\n", + " output_imp2, rp=[50, 100, 250], calc_eai_exp=True, calc_at_event=True\n", + ")\n", "end2 = time.time()\n", - "time_passed_nopool = end2-start2\n", - "print(f'Time passed without pool: {time_passed_nopool}')" + "time_passed_nopool = end2 - start2\n", + "print(f\"Time passed without pool: {time_passed_nopool}\")" ] }, { @@ -2170,10 +2198,11 @@ "source": [ "# Add the original value of the impacts (without uncertainty) to the uncertainty plot\n", "from climada.engine import ImpactCalc\n", + "\n", "imp = ImpactCalc(exp_base, impf_func(), haz).impact(assign_centroids=False)\n", "aai_agg_o = imp.aai_agg\n", "freq_curve_o = imp.calc_freq_curve([50, 100, 250]).impact\n", - "orig_list = [aai_agg_o] + list(freq_curve_o) +[1]" + "orig_list = [aai_agg_o] + list(freq_curve_o) + [1]" ] }, { @@ -2201,7 +2230,12 @@ "source": [ "# plot the aai_agg and freq_curve uncertainty only\n", "# use logarithmic x-scale\n", - "output_imp2.plot_uncertainty(metric_list=['aai_agg', 'freq_curve'], orig_list=orig_list, log=True, figsize=(12,8));" + "output_imp2.plot_uncertainty(\n", + " metric_list=[\"aai_agg\", \"freq_curve\"],\n", + " orig_list=orig_list,\n", + " log=True,\n", + " figsize=(12, 8),\n", + ");" ] }, { @@ -2217,7 +2251,9 @@ "source": [ "# Use the method 'rbd_fast' which is recommend in pair with 'latin'. In addition, change one of the kwargs\n", "# (M=15) of the salib sampling method.\n", - "output_imp2 = calc_imp2.sensitivity(output_imp2, sensitivity_method='rbd_fast', sensitivity_kwargs = {'M': 15})" + "output_imp2 = calc_imp2.sensitivity(\n", + " output_imp2, sensitivity_method=\"rbd_fast\", sensitivity_kwargs={\"M\": 15}\n", + ")" ] }, { @@ -2345,7 +2381,7 @@ } ], "source": [ - "output_imp2.get_largest_si(salib_si='S1', metric_list=['eai_exp']).tail()" + "output_imp2.get_largest_si(salib_si=\"S1\", metric_list=[\"eai_exp\"]).tail()" ] }, { @@ -2401,16 +2437,17 @@ "from climada.util.constants import EXP_DEMO_H5, HAZ_DEMO_H5\n", "from climada.hazard import Centroids, TCTracks, Hazard, TropCyclone\n", "\n", + "\n", "def impf_func(G=1, v_half=84.7, vmin=25.7, k=3, _id=1):\n", "\n", " def xhi(v, v_half, vmin):\n", " return max([(v - vmin), 0]) / (v_half - vmin)\n", "\n", " def sigmoid_func(v, G, v_half, vmin, k):\n", - " return G * xhi(v, v_half, vmin)**k / (1 + xhi(v, v_half, vmin)**k)\n", + " return G * xhi(v, v_half, vmin) ** k / (1 + xhi(v, v_half, vmin) ** k)\n", "\n", - " #In-function imports needed only for parallel computing on Windows\n", - " intensity_unit = 'm/s'\n", + " # In-function imports needed only for parallel computing on Windows\n", + " intensity_unit = \"m/s\"\n", " intensity = np.linspace(0, 150, num=100)\n", " mdd = np.repeat(1, len(intensity))\n", " paa = np.array([sigmoid_func(v, G, v_half, vmin, k) for v in intensity])\n", @@ -2446,7 +2483,7 @@ "# pack future hazard sets into dictionary - we want to sample from this dictionary later\n", "haz_fut_list = [haz_26, haz_45, haz_60, haz_85]\n", "tc_haz_fut_dict = {}\n", - "for r, rcp in enumerate(['26', '45', '60', '85']):\n", + "for r, rcp in enumerate([\"26\", \"45\", \"60\", \"85\"]):\n", " tc_haz_fut_dict[rcp] = haz_fut_list[r]" ] }, @@ -2457,14 +2494,19 @@ "outputs": [], "source": [ "exp_base = Exposures.from_hdf5(EXP_DEMO_H5)\n", - "#It is a good idea to assign the centroids to the base exposures in order to avoid repeating this\n", + "# It is a good idea to assign the centroids to the base exposures in order to avoid repeating this\n", "# potentially costly operation for each sample.\n", "exp_base.assign_centroids(haz)\n", + "\n", + "\n", "def exp_base_func(x_exp, exp_base):\n", " exp = exp_base.copy()\n", - " exp.gdf['value'] *= x_exp\n", + " exp.gdf[\"value\"] *= x_exp\n", " return exp\n", + "\n", + "\n", "from functools import partial\n", + "\n", "exp_func = partial(exp_base_func, exp_base=exp_base)" ] }, @@ -2477,8 +2519,10 @@ "import scipy as sp\n", "from climada.engine.unsequa import InputVar\n", "\n", - "exp_distr = {\"x_exp\": sp.stats.beta(10, 1.1)} #This is not really a reasonable distribution but is used\n", - " #here to show that you can use any scipy distribution.\n", + "exp_distr = {\n", + " \"x_exp\": sp.stats.beta(10, 1.1)\n", + "} # This is not really a reasonable distribution but is used\n", + "# here to show that you can use any scipy distribution.\n", "\n", "exp_iv = InputVar(exp_func, exp_distr)\n", "\n", @@ -2486,8 +2530,8 @@ " \"G\": sp.stats.truncnorm(0.5, 1.5),\n", " \"v_half\": sp.stats.uniform(35, 65),\n", " \"vmin\": sp.stats.uniform(0, 15),\n", - " \"k\": sp.stats.uniform(1, 4)\n", - " }\n", + " \"k\": sp.stats.uniform(1, 4),\n", + "}\n", "impf_iv = InputVar(impf_func, impf_distr)" ] }, @@ -2504,16 +2548,15 @@ "metadata": {}, "outputs": [], "source": [ - "rcp_key = {0: '26',\n", - " 1: '45',\n", - " 2: '60',\n", - " 3: '85'}\n", + "rcp_key = {0: \"26\", 1: \"45\", 2: \"60\", 3: \"85\"}\n", + "\n", "\n", "# future\n", "def haz_fut_func(rcp_scenario):\n", " haz_fut = tc_haz_fut_dict[rcp_key[rcp_scenario]]\n", " return haz_fut\n", "\n", + "\n", "haz_fut_distr = {\"rcp_scenario\": sp.stats.randint(0, 4)}\n", "\n", "haz_fut_iv = InputVar(haz_fut_func, haz_fut_distr)" @@ -2573,8 +2616,8 @@ ], "source": [ "from climada.engine.unsequa import CalcDeltaImpact\n", - "calc_imp = CalcDeltaImpact(exp_iv, impf_iv, haz,\n", - " exp_iv, impf_iv, haz_fut_iv)" + "\n", + "calc_imp = CalcDeltaImpact(exp_iv, impf_iv, haz, exp_iv, impf_iv, haz_fut_iv)" ] }, { @@ -2639,6 +2682,7 @@ ], "source": [ "from climada.engine.unsequa import UncOutput\n", + "\n", "output_imp.plot_uncertainty(calc_delta=True)" ] }, @@ -2687,6 +2731,7 @@ ], "source": [ "from climada.engine.unsequa import UncOutput\n", + "\n", "output_imp.plot_rp_uncertainty(calc_delta=True)" ] }, @@ -2790,39 +2835,50 @@ "from climada.entity import Entity\n", "from climada.hazard import Hazard\n", "\n", + "\n", "# Entity today has an uncertainty in the total asset value\n", "def ent_today_func(x_ent):\n", - " #In-function imports needed only for parallel computing on Windows\n", + " # In-function imports needed only for parallel computing on Windows\n", " from climada.entity import Entity\n", " from climada.util.constants import ENT_DEMO_TODAY\n", + "\n", " entity = Entity.from_excel(ENT_DEMO_TODAY)\n", " entity.exposures.ref_year = 2018\n", - " entity.exposures.gdf['value'] *= x_ent\n", + " entity.exposures.gdf[\"value\"] *= x_ent\n", " return entity\n", "\n", + "\n", "# Entity in the future has a +- 10% uncertainty in the cost of all the adapatation measures\n", "def ent_fut_func(m_fut_cost):\n", - " #In-function imports needed only for parallel computing on Windows\n", + " # In-function imports needed only for parallel computing on Windows\n", " from climada.entity import Entity\n", " from climada.util.constants import ENT_DEMO_FUTURE\n", + "\n", " entity = Entity.from_excel(ENT_DEMO_FUTURE)\n", " entity.exposures.ref_year = 2040\n", - " for meas in entity.measures.get_measure('TC'):\n", + " for meas in entity.measures.get_measure(\"TC\"):\n", " meas.cost *= m_fut_cost\n", " return entity\n", "\n", + "\n", "haz_base = Hazard.from_hdf5(HAZ_DEMO_H5)\n", + "\n", + "\n", "# The hazard intensity in the future is also uncertainty by a multiplicative factor\n", "def haz_fut(x_haz_fut, haz_base):\n", - " #In-function imports needed only for parallel computing on Windows\n", + " # In-function imports needed only for parallel computing on Windows\n", " import copy\n", " from climada.hazard import Hazard\n", " from climada.util.constants import HAZ_DEMO_H5\n", + "\n", " haz = copy.deepcopy(haz_base)\n", " haz.intensity = haz.intensity.multiply(x_haz_fut)\n", " return haz\n", + "\n", + "\n", "from functools import partial\n", - "haz_fut_func = partial(haz_fut, haz_base=haz_base)\n" + "\n", + "haz_fut_func = partial(haz_fut, haz_base=haz_base)" ] }, { @@ -2853,10 +2909,12 @@ } ], "source": [ - "costs_1 = [meas.cost for meas in ent_fut_func(1).measures.get_measure('TC')]\n", - "costs_05 = [meas.cost for meas in ent_fut_func(0.5).measures.get_measure('TC')]\n", - "print(f\"\\nThe cost for m_fut_cost=1 are {costs_1}\\n\"\n", - " f\"The cost for m_fut_cost=0.5 are {costs_05}\");" + "costs_1 = [meas.cost for meas in ent_fut_func(1).measures.get_measure(\"TC\")]\n", + "costs_05 = [meas.cost for meas in ent_fut_func(0.5).measures.get_measure(\"TC\")]\n", + "print(\n", + " f\"\\nThe cost for m_fut_cost=1 are {costs_1}\\n\"\n", + " f\"The cost for m_fut_cost=0.5 are {costs_05}\"\n", + ");" ] }, { @@ -2882,14 +2940,15 @@ "\n", "haz_today = haz_base\n", "\n", - "haz_fut_distr = {\"x_haz_fut\": sp.stats.uniform(1, 3),\n", - " }\n", + "haz_fut_distr = {\n", + " \"x_haz_fut\": sp.stats.uniform(1, 3),\n", + "}\n", "haz_fut_iv = InputVar(haz_fut_func, haz_fut_distr)\n", "\n", - "ent_today_distr = {\"x_ent\": sp.stats.uniform(0.7, 1)}\n", + "ent_today_distr = {\"x_ent\": sp.stats.uniform(0.7, 1)}\n", "ent_today_iv = InputVar(ent_today_func, ent_today_distr)\n", "\n", - "ent_fut_distr = {\"m_fut_cost\": sp.stats.norm(1, 0.1)}\n", + "ent_fut_distr = {\"m_fut_cost\": sp.stats.norm(1, 0.1)}\n", "ent_fut_iv = InputVar(ent_fut_func, ent_fut_distr)" ] }, @@ -3042,8 +3101,12 @@ "source": [ "from climada.engine.unsequa import CalcCostBenefit\n", "\n", - "unc_cb = CalcCostBenefit(haz_input_var=haz_today, ent_input_var=ent_today_iv,\n", - " haz_fut_input_var=haz_fut_iv, ent_fut_input_var=ent_fut_iv)" + "unc_cb = CalcCostBenefit(\n", + " haz_input_var=haz_today,\n", + " ent_input_var=ent_today_iv,\n", + " haz_fut_input_var=haz_fut_iv,\n", + " ent_fut_input_var=ent_fut_iv,\n", + ")" ] }, { @@ -3132,7 +3195,7 @@ } ], "source": [ - "output_cb= unc_cb.make_sample(N=10, sampling_kwargs={'calc_second_order':False})\n", + "output_cb = unc_cb.make_sample(N=10, sampling_kwargs={\"calc_second_order\": False})\n", "output_cb.get_samples_df().tail()" ] }, @@ -4626,12 +4689,11 @@ } ], "source": [ - "\n", - "#without pool\n", + "# without pool\n", "output_cb = unc_cb.uncertainty(output_cb)\n", "\n", - "#with pool\n", - "output_cb = unc_cb.uncertainty(output_cb, processes=4)\n" + "# with pool\n", + "output_cb = unc_cb.uncertainty(output_cb, processes=4)" ] }, { @@ -4667,7 +4729,7 @@ } ], "source": [ - "#Top level metrics keys\n", + "# Top level metrics keys\n", "macro_metrics = output_cb.uncertainty_metrics\n", "macro_metrics" ] @@ -4803,7 +4865,7 @@ ], "source": [ "# The benefits and cost_ben_ratio are available for each measure\n", - "output_cb.get_uncertainty(metric_list=['benefit', 'cost_ben_ratio']).tail()" + "output_cb.get_uncertainty(metric_list=[\"benefit\", \"cost_ben_ratio\"]).tail()" ] }, { @@ -5073,7 +5135,7 @@ "source": [ "# The impact_meas_present and impact_meas_future provide values of the cost_meas, risk_transf, risk,\n", "# and cost_ins for each measure\n", - "output_cb.get_uncertainty(metric_list=['imp_meas_present']).tail()" + "output_cb.get_uncertainty(metric_list=[\"imp_meas_present\"]).tail()" ] }, { @@ -5106,7 +5168,7 @@ ], "source": [ "# tot_climate_risk and benefit\n", - "output_cb.plot_uncertainty(metric_list=['benefit'], figsize=(12,8));" + "output_cb.plot_uncertainty(metric_list=[\"benefit\"], figsize=(12, 8));" ] }, { @@ -5127,7 +5189,9 @@ }, "outputs": [], "source": [ - "output_cb = unc_cb.sensitivity(output_cb, sensitivity_kwargs={'calc_second_order':False})" + "output_cb = unc_cb.sensitivity(\n", + " output_cb, sensitivity_kwargs={\"calc_second_order\": False}\n", + ")" ] }, { @@ -5161,8 +5225,10 @@ } ], "source": [ - "#plot only certain metrics\n", - "axes = output_cb.plot_sensitivity(metric_list=['cost_ben_ratio','tot_climate_risk','benefit'], figsize=(12,8));" + "# plot only certain metrics\n", + "axes = output_cb.plot_sensitivity(\n", + " metric_list=[\"cost_ben_ratio\", \"tot_climate_risk\", \"benefit\"], figsize=(12, 8)\n", + ");" ] }, { @@ -5216,6 +5282,7 @@ "outputs": [], "source": [ "from climada.util.api_client import Client\n", + "\n", "client = Client()" ] }, @@ -5231,9 +5298,9 @@ "\n", "def get_ws(iso):\n", " properties = {\n", - " 'country_iso3alpha': iso,\n", + " \"country_iso3alpha\": iso,\n", " }\n", - " return client.get_hazard('storm_europe', properties=properties)\n" + " return client.get_hazard(\"storm_europe\", properties=properties)" ] }, { @@ -5242,12 +5309,12 @@ "metadata": {}, "outputs": [], "source": [ - "#Define list of exposures and/or of hazard files\n", + "# Define list of exposures and/or of hazard files\n", "\n", - "exp_list = [get_litpop(iso) for iso in ['CHE', 'DEU', 'ITA']]\n", - "haz_list = [get_ws(iso) for iso in ['CHE', 'DEU', 'ITA']]\n", + "exp_list = [get_litpop(iso) for iso in [\"CHE\", \"DEU\", \"ITA\"]]\n", + "haz_list = [get_ws(iso) for iso in [\"CHE\", \"DEU\", \"ITA\"]]\n", "for exp, haz in zip(exp_list, haz_list):\n", - " exp.gdf['impf_WS'] = 1\n", + " exp.gdf[\"impf_WS\"] = 1\n", " exp.assign_centroids(haz)" ] }, @@ -5257,7 +5324,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Define the input variable\n", + "# Define the input variable\n", "from climada.entity import ImpactFuncSet, Exposures\n", "from climada.entity.impact_funcs.storm_europe import ImpfStormEurope\n", "from climada.hazard import Hazard\n", @@ -5265,31 +5332,40 @@ "import scipy as sp\n", "import copy\n", "\n", + "\n", "def exp_func(cnt, x_exp, exp_list=exp_list):\n", " exp = exp_list[int(cnt)].copy()\n", - " exp.gdf['value'] *= x_exp\n", + " exp.gdf[\"value\"] *= x_exp\n", " return exp\n", "\n", - "exp_distr = {\"x_exp\": sp.stats.uniform(0.9, 0.2),\n", - " \"cnt\": sp.stats.randint(low=0, high=len(exp_list)) #use the same parameter name accross input variables\n", - " }\n", + "\n", + "exp_distr = {\n", + " \"x_exp\": sp.stats.uniform(0.9, 0.2),\n", + " \"cnt\": sp.stats.randint(\n", + " low=0, high=len(exp_list)\n", + " ), # use the same parameter name accross input variables\n", + "}\n", "exp_iv = InputVar(exp_func, exp_distr)\n", "\n", "\n", "def haz_func(cnt, i_haz, haz_list=haz_list):\n", - " haz = copy.deepcopy(haz_list[int(cnt)]) #use the same parameter name accross input variables\n", + " haz = copy.deepcopy(\n", + " haz_list[int(cnt)]\n", + " ) # use the same parameter name accross input variables\n", " haz.intensity *= i_haz\n", " return haz\n", "\n", - "haz_distr = {\"i_haz\": sp.stats.norm(1, 0.2),\n", - " \"cnt\": sp.stats.randint(low=0, high=len(haz_list))\n", - " }\n", + "\n", + "haz_distr = {\n", + " \"i_haz\": sp.stats.norm(1, 0.2),\n", + " \"cnt\": sp.stats.randint(low=0, high=len(haz_list)),\n", + "}\n", "haz_iv = InputVar(haz_func, haz_distr)\n", "\n", "impf = ImpfStormEurope.from_schwierz()\n", "impf_set = ImpactFuncSet()\n", "impf_set.append(impf)\n", - "impf_iv = InputVar.impfset([impf_set], bounds_mdd = [0.9, 1.1])" + "impf_iv = InputVar.impfset([impf_set], bounds_mdd=[0.9, 1.1])" ] }, { @@ -5321,7 +5397,7 @@ "metadata": {}, "outputs": [], "source": [ - "output_imp = calc_imp.make_sample(N=2**2, sampling_kwargs={'skip_values': 2**3})\n" + "output_imp = calc_imp.make_sample(N=2**2, sampling_kwargs={\"skip_values\": 2**3})" ] }, { @@ -5457,6 +5533,7 @@ "outputs": [], "source": [ "from climada.util.api_client import Client\n", + "\n", "client = Client()" ] }, @@ -5468,21 +5545,26 @@ "source": [ "def get_litpop_path(iso):\n", " properties = {\n", - " 'country_iso3alpha': iso,\n", - " 'res_arcsec': '150',\n", - " 'exponents': '(1,1)',\n", - " 'fin_mode': 'pc'\n", + " \"country_iso3alpha\": iso,\n", + " \"res_arcsec\": \"150\",\n", + " \"exponents\": \"(1,1)\",\n", + " \"fin_mode\": \"pc\",\n", " }\n", - " litpop_datasets = client.list_dataset_infos(data_type='litpop', properties=properties)\n", + " litpop_datasets = client.list_dataset_infos(\n", + " data_type=\"litpop\", properties=properties\n", + " )\n", " ds = litpop_datasets[0]\n", " download_dir, ds_files = client.download_dataset(ds)\n", " return ds_files[0]\n", "\n", + "\n", "def get_ws_path(iso):\n", " properties = {\n", - " 'country_iso3alpha': iso,\n", + " \"country_iso3alpha\": iso,\n", " }\n", - " hazard_datasets = client.list_dataset_infos(data_type='storm_europe', properties=properties)\n", + " hazard_datasets = client.list_dataset_infos(\n", + " data_type=\"storm_europe\", properties=properties\n", + " )\n", " ds = hazard_datasets[0]\n", " download_dir, ds_files = client.download_dataset(ds)\n", " return ds_files[0]" @@ -5494,10 +5576,10 @@ "metadata": {}, "outputs": [], "source": [ - "#Define list of exposures and/or of hazard files\n", + "# Define list of exposures and/or of hazard files\n", "\n", - "f_exp_list = [get_litpop_path(iso) for iso in ['CHE', 'DEU', 'ITA']]\n", - "f_haz_list = [get_ws_path(iso) for iso in ['CHE', 'DEU', 'ITA']]" + "f_exp_list = [get_litpop_path(iso) for iso in [\"CHE\", \"DEU\", \"ITA\"]]\n", + "f_haz_list = [get_ws_path(iso) for iso in [\"CHE\", \"DEU\", \"ITA\"]]" ] }, { @@ -5506,40 +5588,43 @@ "metadata": {}, "outputs": [], "source": [ - "#Define the input variable for the loading files\n", - "#The trick is to not reload a file if it is already in memory. This is done using a global variable.\n", + "# Define the input variable for the loading files\n", + "# The trick is to not reload a file if it is already in memory. This is done using a global variable.\n", "from climada.entity import ImpactFunc, ImpactFuncSet, Exposures\n", "from climada.hazard import Hazard\n", "from climada.engine.unsequa import InputVar\n", "import scipy as sp\n", "import copy\n", "\n", + "\n", "def exp_func(f_exp, x_exp, filename_list=f_exp_list):\n", " filename = filename_list[int(f_exp)]\n", " global exp_base\n", - " if 'exp_base' in globals():\n", + " if \"exp_base\" in globals():\n", " if isinstance(exp_base, Exposures):\n", - " if exp_base.gdf['filename'] != str(filename):\n", + " if exp_base.gdf[\"filename\"] != str(filename):\n", " exp_base = Exposures.from_hdf5(filename)\n", - " exp_base.gdf['filename'] = str(filename)\n", + " exp_base.gdf[\"filename\"] = str(filename)\n", " else:\n", " exp_base = Exposures.from_hdf5(filename)\n", - " exp_base.gdf['filename'] = str(filename)\n", + " exp_base.gdf[\"filename\"] = str(filename)\n", "\n", " exp = exp_base.copy()\n", - " exp.gdf['value'] *= x_exp\n", + " exp.gdf[\"value\"] *= x_exp\n", " return exp\n", "\n", - "exp_distr = {\"x_exp\": sp.stats.uniform(0.9, 0.2),\n", - " \"f_exp\": sp.stats.randint(low=0, high=len(f_exp_list))\n", - " }\n", + "\n", + "exp_distr = {\n", + " \"x_exp\": sp.stats.uniform(0.9, 0.2),\n", + " \"f_exp\": sp.stats.randint(low=0, high=len(f_exp_list)),\n", + "}\n", "exp_iv = InputVar(exp_func, exp_distr)\n", "\n", "\n", "def haz_func(f_haz, i_haz, filename_list=f_haz_list):\n", " filename = filename_list[int(f_haz)]\n", " global haz_base\n", - " if 'haz_base' in globals():\n", + " if \"haz_base\" in globals():\n", " if isinstance(haz_base, Hazard):\n", " if haz_base.filename != str(filename):\n", " haz_base = Hazard.from_hdf5(filename)\n", @@ -5552,9 +5637,11 @@ " haz.intensity *= i_haz\n", " return haz\n", "\n", - "haz_distr = {\"i_haz\": sp.stats.norm(1, 0.2),\n", - " \"f_haz\": sp.stats.randint(low=0, high=len(f_haz_list))\n", - " }\n", + "\n", + "haz_distr = {\n", + " \"i_haz\": sp.stats.norm(1, 0.2),\n", + " \"f_haz\": sp.stats.randint(low=0, high=len(f_haz_list)),\n", + "}\n", "haz_iv = InputVar(haz_func, haz_distr)\n", "\n", "\n", @@ -5564,29 +5651,33 @@ " return max([(v - vmin), 0]) / (v_half - vmin)\n", "\n", " def sigmoid_func(v, G, v_half, vmin, k):\n", - " return G * xhi(v, v_half, vmin)**k / (1 + xhi(v, v_half, vmin)**k)\n", + " return G * xhi(v, v_half, vmin) ** k / (1 + xhi(v, v_half, vmin) ** k)\n", "\n", - " #In-function imports needed only for parallel computing on Windows\n", + " # In-function imports needed only for parallel computing on Windows\n", " import numpy as np\n", " from climada.entity import ImpactFunc, ImpactFuncSet\n", + "\n", " imp_fun = ImpactFunc()\n", - " imp_fun.haz_type = 'WS'\n", + " imp_fun.haz_type = \"WS\"\n", " imp_fun.id = _id\n", - " imp_fun.intensity_unit = 'm/s'\n", + " imp_fun.intensity_unit = \"m/s\"\n", " imp_fun.intensity = np.linspace(0, 150, num=100)\n", " imp_fun.mdd = np.repeat(1, len(imp_fun.intensity))\n", - " imp_fun.paa = np.array([sigmoid_func(v, G, v_half, vmin, k) for v in imp_fun.intensity])\n", + " imp_fun.paa = np.array(\n", + " [sigmoid_func(v, G, v_half, vmin, k) for v in imp_fun.intensity]\n", + " )\n", " imp_fun.check()\n", " impf_set = ImpactFuncSet()\n", " impf_set.append(imp_fun)\n", " return impf_set\n", "\n", + "\n", "impf_distr = {\n", " \"G\": sp.stats.truncnorm(0.5, 1.5),\n", " \"v_half\": sp.stats.uniform(35, 65),\n", " \"vmin\": sp.stats.uniform(0, 15),\n", - " \"k\": sp.stats.uniform(1, 4)\n", - " }\n", + " \"k\": sp.stats.uniform(1, 4),\n", + "}\n", "impf_iv = InputVar(impf_func, impf_distr)" ] }, @@ -5615,8 +5706,8 @@ "outputs": [], "source": [ "# Ordering of the samples by hazard first and exposures second\n", - "output_imp = calc_imp.make_sample(N=2**2, sampling_kwargs={'skip_values': 2**3})\n", - "output_imp.order_samples(by=['f_haz', 'f_exp'])" + "output_imp = calc_imp.make_sample(N=2**2, sampling_kwargs={\"skip_values\": 2**3})\n", + "output_imp.order_samples(by=[\"f_haz\", \"f_exp\"])" ] }, { @@ -5633,8 +5724,9 @@ "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", - "e = output_imp.samples_df['f_exp'].values\n", - "h = output_imp.samples_df['f_haz'].values" + "\n", + "e = output_imp.samples_df[\"f_exp\"].values\n", + "h = output_imp.samples_df[\"f_haz\"].values" ] }, { @@ -5650,12 +5742,12 @@ "metadata": {}, "outputs": [], "source": [ - "plt.plot(e, label='exposures');\n", - "plt.plot(h, label='hazards');\n", - "plt.xlabel('samples');\n", - "plt.ylabel('file number');\n", - "plt.title('Order of exposures and hazards files in samples');\n", - "plt.legend(loc='upper right');" + "plt.plot(e, label=\"exposures\")\n", + "plt.plot(h, label=\"hazards\")\n", + "plt.xlabel(\"samples\")\n", + "plt.ylabel(\"file number\")\n", + "plt.title(\"Order of exposures and hazards files in samples\")\n", + "plt.legend(loc=\"upper right\");" ] }, { @@ -5727,4 +5819,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/doc/tutorial/climada_engine_unsequa_helper.ipynb b/doc/tutorial/climada_engine_unsequa_helper.ipynb index 831f5f4bdd..adad223232 100644 --- a/doc/tutorial/climada_engine_unsequa_helper.ipynb +++ b/doc/tutorial/climada_engine_unsequa_helper.ipynb @@ -37,7 +37,8 @@ "outputs": [], "source": [ "import warnings\n", - "warnings.filterwarnings('ignore') #Ignore warnings for making the tutorial's pdf." + "\n", + "warnings.filterwarnings(\"ignore\") # Ignore warnings for making the tutorial's pdf." ] }, { @@ -101,9 +102,10 @@ } ], "source": [ - "#Define the base exposure\n", + "# Define the base exposure\n", "from climada.util.constants import EXP_DEMO_H5\n", "from climada.entity import Exposures\n", + "\n", "exp_base = Exposures.from_hdf5(EXP_DEMO_H5)" ] }, @@ -120,8 +122,9 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", - "bounds_totval = [0.9, 1.1] #+- 10% noise on the total exposures value\n", - "bounds_noise = [0.9, 1.2] #-10% - +20% noise each exposures point\n", + "\n", + "bounds_totval = [0.9, 1.1] # +- 10% noise on the total exposures value\n", + "bounds_noise = [0.9, 1.2] # -10% - +20% noise each exposures point\n", "exp_iv = InputVar.exp([exp_base], bounds_totval, bounds_noise)" ] }, @@ -148,10 +151,10 @@ } ], "source": [ - "#The difference in total value between the base exposure and the average input uncertainty exposure\n", - "#due to the random noise on each exposures point (the average change in the total value is 1.0).\n", + "# The difference in total value between the base exposure and the average input uncertainty exposure\n", + "# due to the random noise on each exposures point (the average change in the total value is 1.0).\n", "avg_exp = exp_iv.evaluate()\n", - "(sum(avg_exp.gdf['value']) - sum(exp_base.gdf['value'])) / sum(exp_base.gdf['value'])" + "(sum(avg_exp.gdf[\"value\"]) - sum(exp_base.gdf[\"value\"])) / sum(exp_base.gdf[\"value\"])" ] }, { @@ -177,8 +180,8 @@ } ], "source": [ - "#The values for EN are seeds for the random number generator for the noise sampling and\n", - "#thus are uniformly sampled numbers between (0, 2**32-1)\n", + "# The values for EN are seeds for the random number generator for the noise sampling and\n", + "# thus are uniformly sampled numbers between (0, 2**32-1)\n", "exp_iv.plot();" ] }, @@ -208,19 +211,23 @@ }, "outputs": [], "source": [ - "#Define a generic method to make litpop instances with different exponent pairs.\n", + "# Define a generic method to make litpop instances with different exponent pairs.\n", "from climada.entity import LitPop\n", - "def generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs,\n", - " choice_mn, **litpop_kwargs):\n", - " #In-function imports needed only for parallel computing on Windows\n", + "\n", + "\n", + "def generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + "):\n", + " # In-function imports needed only for parallel computing on Windows\n", " from climada.entity import LitPop\n", + "\n", " litpop_base = []\n", " for [m, n] in choice_mn:\n", - " print('\\n Computing litpop for m=%d, n=%d \\n' %(m, n))\n", - " litpop_kwargs['exponents'] = (m, n)\n", + " print(\"\\n Computing litpop for m=%d, n=%d \\n\" % (m, n))\n", + " litpop_kwargs[\"exponents\"] = (m, n)\n", " exp = LitPop.from_countries(**litpop_kwargs)\n", - " exp.gdf['impf_' + haz.haz_type] = impf_id\n", - " exp.gdf.drop('impf_', axis=1, inplace=True)\n", + " exp.gdf[\"impf_\" + haz.haz_type] = impf_id\n", + " exp.gdf.drop(\"impf_\", axis=1, inplace=True)\n", " if value_unit is not None:\n", " exp.value_unit = value_unit\n", " exp.assign_centroids(haz, **assign_centr_kwargs)\n", @@ -248,22 +255,23 @@ } ], "source": [ - "#Define the parameters of the LitPop instances\n", + "# Define the parameters of the LitPop instances\n", "tot_pop = 11.317e6\n", "impf_id = 1\n", - "value_unit = 'people'\n", + "value_unit = \"people\"\n", "litpop_kwargs = {\n", - " 'countries' : ['CUB'],\n", - " 'res_arcsec' : 150,\n", - " 'reference_year' : 2020,\n", - " 'fin_mode' : 'norm',\n", - " 'total_values' : [tot_pop]\n", + " \"countries\": [\"CUB\"],\n", + " \"res_arcsec\": 150,\n", + " \"reference_year\": 2020,\n", + " \"fin_mode\": \"norm\",\n", + " \"total_values\": [tot_pop],\n", "}\n", - "assign_centr_kwargs={}\n", + "assign_centr_kwargs = {}\n", "\n", "# The hazard is needed to assign centroids\n", "from climada.util.constants import HAZ_DEMO_H5\n", "from climada.hazard import Hazard\n", + "\n", "haz = Hazard.from_hdf5(HAZ_DEMO_H5)" ] }, @@ -460,11 +468,13 @@ } ], "source": [ - "#Generate the LitPop list\n", + "# Generate the LitPop list\n", "\n", - "choice_mn = [[0, 0.5], [0, 1], [0, 2]] #Choice of exponents m,n\n", + "choice_mn = [[0, 0.5], [0, 1], [0, 2]] # Choice of exponents m,n\n", "\n", - "litpop_list = generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs)\n" + "litpop_list = generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + ")" ] }, { @@ -480,9 +490,9 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", - "bounds_totval = [0.9, 1.1] #+- 10% noise on the total exposures value\n", - "litpop_iv = InputVar.exp(exp_list = litpop_list,\n", - " bounds_totval=bounds_totval)" + "\n", + "bounds_totval = [0.9, 1.1] # +- 10% noise on the total exposures value\n", + "litpop_iv = InputVar.exp(exp_list=litpop_list, bounds_totval=bounds_totval)" ] }, { @@ -848,8 +858,8 @@ } ], "source": [ - "#The values for EN are seeds for the random number generator for the noise sampling and\n", - "#thus are uniformly sampled numbers between (0, 2**32-1)\n", + "# The values for EN are seeds for the random number generator for the noise sampling and\n", + "# thus are uniformly sampled numbers between (0, 2**32-1)\n", "litpop_iv.plot();" ] }, @@ -912,9 +922,10 @@ } ], "source": [ - "#Define the base exposure\n", + "# Define the base exposure\n", "from climada.util.constants import HAZ_DEMO_H5\n", "from climada.hazard import Hazard\n", + "\n", "haz_base = Hazard.from_hdf5(HAZ_DEMO_H5)" ] }, @@ -931,10 +942,13 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", - "bounds_freq = [0.9, 1.1] #+- 10% noise on the frequency of all events\n", - "bounds_int = None #No uncertainty on the intensity\n", + "\n", + "bounds_freq = [0.9, 1.1] # +- 10% noise on the frequency of all events\n", + "bounds_int = None # No uncertainty on the intensity\n", "n_ev = None\n", - "haz_iv = InputVar.haz([haz_base], n_ev=n_ev, bounds_freq=bounds_freq, bounds_int=bounds_int)" + "haz_iv = InputVar.haz(\n", + " [haz_base], n_ev=n_ev, bounds_freq=bounds_freq, bounds_int=bounds_int\n", + ")" ] }, { @@ -960,8 +974,8 @@ } ], "source": [ - "#The difference in frequency for HF=1.1 is indeed 10%.\n", - "haz_high_freq = haz_iv.evaluate(HE=n_ev, HI=None, HF = 1.1)\n", + "# The difference in frequency for HF=1.1 is indeed 10%.\n", + "haz_high_freq = haz_iv.evaluate(HE=n_ev, HI=None, HF=1.1)\n", "(sum(haz_high_freq.frequency) - sum(haz_base.frequency)) / sum(haz_base.frequency)" ] }, @@ -977,12 +991,18 @@ }, "outputs": [], "source": [ - "bounds_freq = [0.9, 1.1] #+- 10% noise on the frequency of all events\n", - "bounds_int = None #No uncertainty on the intensity\n", - "bounds_frac = [0.7, 1.1] #noise on the fraction of all events\n", - "n_ev = round(0.8 * haz_base.size) #sub-sample with re-draw events to obtain hazards with n=0.8*tot_number_events\n", + "bounds_freq = [0.9, 1.1] # +- 10% noise on the frequency of all events\n", + "bounds_int = None # No uncertainty on the intensity\n", + "bounds_frac = [0.7, 1.1] # noise on the fraction of all events\n", + "n_ev = round(\n", + " 0.8 * haz_base.size\n", + ") # sub-sample with re-draw events to obtain hazards with n=0.8*tot_number_events\n", "haz_iv = InputVar.haz(\n", - " [haz_base], n_ev=n_ev, bounds_freq=bounds_freq, bounds_int=bounds_int, bounds_frac=bounds_frac\n", + " [haz_base],\n", + " n_ev=n_ev,\n", + " bounds_freq=bounds_freq,\n", + " bounds_int=bounds_int,\n", + " bounds_frac=bounds_frac,\n", ")" ] }, @@ -1007,9 +1027,12 @@ "outputs": [], "source": [ "import numpy as np\n", - "HE = 2618981871 #The random seed (number between 0 and 2**32)\n", - "rng = np.random.RandomState(int(HE)) #Initialize a random state with the seed\n", - "chosen_ev = list(rng.choice(haz_base.event_name, int(n_ev))) #Obtain the corresponding events" + "\n", + "HE = 2618981871 # The random seed (number between 0 and 2**32)\n", + "rng = np.random.RandomState(int(HE)) # Initialize a random state with the seed\n", + "chosen_ev = list(\n", + " rng.choice(haz_base.event_name, int(n_ev))\n", + ") # Obtain the corresponding events" ] }, { @@ -1035,7 +1058,7 @@ } ], "source": [ - "#The first event is\n", + "# The first event is\n", "chosen_ev[0]" ] }, @@ -1062,8 +1085,8 @@ } ], "source": [ - "#The values for HE are seeds for the random number generator for the noise sampling and\n", - "#thus are uniformly sampled numbers between (0, 2**32-1)\n", + "# The values for HE are seeds for the random number generator for the noise sampling and\n", + "# thus are uniformly sampled numbers between (0, 2**32-1)\n", "haz_iv.plot();" ] }, @@ -1098,9 +1121,9 @@ } ], "source": [ - "#The number of events per sample is equal to n_ev\n", - "haz_sub = haz_iv.evaluate(HE=928165924, HI=None, HF = 1.1, HA=None)\n", - "#The number for HE is irrelevant, as all samples have the same n_Ev\n", + "# The number of events per sample is equal to n_ev\n", + "haz_sub = haz_iv.evaluate(HE=928165924, HI=None, HF=1.1, HA=None)\n", + "# The number for HE is irrelevant, as all samples have the same n_Ev\n", "haz_sub.size - n_ev" ] }, @@ -1149,6 +1172,7 @@ "outputs": [], "source": [ "from climada.entity import ImpactFuncSet, ImpfTropCyclone\n", + "\n", "impf = ImpfTropCyclone.from_emanuel_usa()\n", "impf_set_base = ImpactFuncSet([impf])" ] @@ -1174,14 +1198,17 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", - "bounds_impfi = [-10, 10] #-10 m/s ; +10m/s uncertainty on the intensity\n", - "bounds_mdd = [0.7, 1.1] #-30% - +10% uncertainty on the mdd\n", - "bounds_paa = None #No uncertainty in the paa\n", - "impf_iv = InputVar.impfset(impf_set_list=[impf_set_base],\n", - " bounds_impfi=bounds_impfi,\n", - " bounds_mdd=bounds_mdd,\n", - " bounds_paa=bounds_paa,\n", - " haz_id_dict={'TC': [1]})" + "\n", + "bounds_impfi = [-10, 10] # -10 m/s ; +10m/s uncertainty on the intensity\n", + "bounds_mdd = [0.7, 1.1] # -30% - +10% uncertainty on the mdd\n", + "bounds_paa = None # No uncertainty in the paa\n", + "impf_iv = InputVar.impfset(\n", + " impf_set_list=[impf_set_base],\n", + " bounds_impfi=bounds_impfi,\n", + " bounds_mdd=bounds_mdd,\n", + " bounds_paa=bounds_paa,\n", + " haz_id_dict={\"TC\": [1]},\n", + ")" ] }, { @@ -1207,11 +1234,11 @@ } ], "source": [ - "#Plot the impact function for 50 random samples (note for the expert, these are not global)\n", + "# Plot the impact function for 50 random samples (note for the expert, these are not global)\n", "n = 50\n", "ax = impf_iv.evaluate().plot()\n", - "inten = impf_iv.distr_dict['IFi'].rvs(size=n)\n", - "mdd = impf_iv.distr_dict['MDD'].rvs(size=n)\n", + "inten = impf_iv.distr_dict[\"IFi\"].rvs(size=n)\n", + "mdd = impf_iv.distr_dict[\"MDD\"].rvs(size=n)\n", "for i, m in zip(inten, mdd):\n", " impf_iv.evaluate(IFi=i, MDD=m).plot(axis=ax)\n", "ax.get_legend().remove()" @@ -1286,6 +1313,7 @@ "source": [ "from climada.entity import Entity\n", "from climada.util.constants import ENT_DEMO_TODAY\n", + "\n", "ent = Entity.from_excel(ENT_DEMO_TODAY)\n", "ent.exposures.ref_year = 2018\n", "ent.check()" @@ -1304,11 +1332,12 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", + "\n", "ent_iv = InputVar.ent(\n", - " impf_set_list = [ent.impact_funcs],\n", - " disc_rate = ent.disc_rates,\n", - " exp_list = [ent.exposures],\n", - " meas_set = ent.measures,\n", + " impf_set_list=[ent.impact_funcs],\n", + " disc_rate=ent.disc_rates,\n", + " exp_list=[ent.exposures],\n", + " meas_set=ent.measures,\n", " bounds_disc=[0, 0.08],\n", " bounds_cost=[0.5, 1.5],\n", " bounds_totval=[0.9, 1.1],\n", @@ -1316,8 +1345,8 @@ " bounds_mdd=[0.9, 1.05],\n", " bounds_paa=None,\n", " bounds_impfi=[-2, 5],\n", - " haz_id_dict={'TC': [1]}\n", - " )" + " haz_id_dict={\"TC\": [1]},\n", + ")" ] }, { @@ -1367,19 +1396,23 @@ }, "outputs": [], "source": [ - "#Define a generic method to make litpop instances with different exponent pairs.\n", + "# Define a generic method to make litpop instances with different exponent pairs.\n", "from climada.entity import LitPop\n", - "def generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs,\n", - " choice_mn, **litpop_kwargs):\n", - " #In-function imports needed only for parallel computing on Windows\n", + "\n", + "\n", + "def generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + "):\n", + " # In-function imports needed only for parallel computing on Windows\n", " from climada.entity import LitPop\n", + "\n", " litpop_base = []\n", " for [m, n] in choice_mn:\n", - " print('\\n Computing litpop for m=%d, n=%d \\n' %(m, n))\n", - " litpop_kwargs['exponents'] = (m, n)\n", + " print(\"\\n Computing litpop for m=%d, n=%d \\n\" % (m, n))\n", + " litpop_kwargs[\"exponents\"] = (m, n)\n", " exp = LitPop.from_countries(**litpop_kwargs)\n", - " exp.gdf['impf_' + haz.haz_type] = impf_id\n", - " exp.gdf.drop('impf_', axis=1, inplace=True)\n", + " exp.gdf[\"impf_\" + haz.haz_type] = impf_id\n", + " exp.gdf.drop(\"impf_\", axis=1, inplace=True)\n", " if value_unit is not None:\n", " exp.value_unit = value_unit\n", " exp.assign_centroids(haz, **assign_centr_kwargs)\n", @@ -1407,19 +1440,20 @@ } ], "source": [ - "#Define the parameters of the LitPop instances\n", + "# Define the parameters of the LitPop instances\n", "impf_id = 1\n", "value_unit = None\n", "litpop_kwargs = {\n", - " 'countries' : ['CUB'],\n", - " 'res_arcsec' : 300,\n", - " 'reference_year' : 2020,\n", + " \"countries\": [\"CUB\"],\n", + " \"res_arcsec\": 300,\n", + " \"reference_year\": 2020,\n", "}\n", - "assign_centr_kwargs={}\n", + "assign_centr_kwargs = {}\n", "\n", "# The hazard is needed to assign centroids\n", "from climada.util.constants import HAZ_DEMO_H5\n", "from climada.hazard import Hazard\n", + "\n", "haz = Hazard.from_hdf5(HAZ_DEMO_H5)" ] }, @@ -1661,11 +1695,13 @@ } ], "source": [ - "#Generate the LitPop list\n", + "# Generate the LitPop list\n", "\n", - "choice_mn = [[1, 0.5], [0.5, 1], [1, 1]] #Choice of exponents m,n\n", + "choice_mn = [[1, 0.5], [0.5, 1], [1, 1]] # Choice of exponents m,n\n", "\n", - "litpop_list = generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs)\n" + "litpop_list = generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + ")" ] }, { @@ -1693,6 +1729,7 @@ "source": [ "from climada.entity import Entity\n", "from climada.util.constants import ENT_DEMO_TODAY\n", + "\n", "ent = Entity.from_excel(ENT_DEMO_TODAY)\n", "ent.exposures.ref_year = 2020\n", "ent.check()" @@ -1711,11 +1748,12 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", + "\n", "ent_iv = InputVar.ent(\n", - " impf_set_list = [ent.impact_funcs],\n", - " disc_rate = ent.disc_rates,\n", - " exp_list = litpop_list,\n", - " meas_set = ent.measures,\n", + " impf_set_list=[ent.impact_funcs],\n", + " disc_rate=ent.disc_rates,\n", + " exp_list=litpop_list,\n", + " meas_set=ent.measures,\n", " bounds_disc=[0, 0.08],\n", " bounds_cost=[0.5, 1.5],\n", " bounds_totval=[0.9, 1.1],\n", @@ -1723,8 +1761,8 @@ " bounds_mdd=[0.9, 1.05],\n", " bounds_paa=None,\n", " bounds_impfi=[-2, 5],\n", - " haz_id_dict={'TC': [1]}\n", - " )" + " haz_id_dict={\"TC\": [1]},\n", + ")" ] }, { @@ -1847,16 +1885,16 @@ "outputs": [], "source": [ "entfut_iv = InputVar.entfut(\n", - " impf_set_list = [ent_fut.impact_funcs],\n", - " exp_list = [ent_fut.exposures],\n", - " meas_set = ent_fut.measures,\n", + " impf_set_list=[ent_fut.impact_funcs],\n", + " exp_list=[ent_fut.exposures],\n", + " meas_set=ent_fut.measures,\n", " bounds_cost=[0.6, 1.2],\n", " bounds_eg=[0.8, 1.5],\n", " bounds_noise=None,\n", " bounds_mdd=[0.7, 0.9],\n", " bounds_paa=[1.3, 2],\n", - " haz_id_dict={'TC': [1]}\n", - " )" + " haz_id_dict={\"TC\": [1]},\n", + ")" ] }, { @@ -1879,19 +1917,23 @@ }, "outputs": [], "source": [ - "#Define a generic method to make litpop instances with different exponent pairs.\n", + "# Define a generic method to make litpop instances with different exponent pairs.\n", "from climada.entity import LitPop\n", - "def generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs,\n", - " choice_mn, **litpop_kwargs):\n", - " #In-function imports needed only for parallel computing on Windows\n", + "\n", + "\n", + "def generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + "):\n", + " # In-function imports needed only for parallel computing on Windows\n", " from climada.entity import LitPop\n", + "\n", " litpop_base = []\n", " for [m, n] in choice_mn:\n", - " print('\\n Computing litpop for m=%d, n=%d \\n' %(m, n))\n", - " litpop_kwargs['exponents'] = (m, n)\n", + " print(\"\\n Computing litpop for m=%d, n=%d \\n\" % (m, n))\n", + " litpop_kwargs[\"exponents\"] = (m, n)\n", " exp = LitPop.from_countries(**litpop_kwargs)\n", - " exp.gdf['impf_' + haz.haz_type] = impf_id\n", - " exp.gdf.drop('impf_', axis=1, inplace=True)\n", + " exp.gdf[\"impf_\" + haz.haz_type] = impf_id\n", + " exp.gdf.drop(\"impf_\", axis=1, inplace=True)\n", " if value_unit is not None:\n", " exp.value_unit = value_unit\n", " exp.assign_centroids(haz, **assign_centr_kwargs)\n", @@ -1919,19 +1961,20 @@ } ], "source": [ - "#Define the parameters of the LitPop instances\n", + "# Define the parameters of the LitPop instances\n", "impf_id = 1\n", "value_unit = None\n", "litpop_kwargs = {\n", - " 'countries' : ['CUB'],\n", - " 'res_arcsec' : 300,\n", - " 'reference_year' : 2040,\n", + " \"countries\": [\"CUB\"],\n", + " \"res_arcsec\": 300,\n", + " \"reference_year\": 2040,\n", "}\n", - "assign_centr_kwargs={}\n", + "assign_centr_kwargs = {}\n", "\n", "# The hazard is needed to assign centroids\n", "from climada.util.constants import HAZ_DEMO_H5\n", "from climada.hazard import Hazard\n", + "\n", "haz = Hazard.from_hdf5(HAZ_DEMO_H5)" ] }, @@ -2306,11 +2349,13 @@ } ], "source": [ - "#Generate the LitPop list\n", + "# Generate the LitPop list\n", "\n", - "choice_mn = [[1, 0.5], [0.5, 1], [1, 1]] #Choice of exponents m,n\n", + "choice_mn = [[1, 0.5], [0.5, 1], [1, 1]] # Choice of exponents m,n\n", "\n", - "litpop_list = generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs)\n" + "litpop_list = generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + ")" ] }, { @@ -2358,17 +2403,18 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", + "\n", "entfut_iv = InputVar.entfut(\n", - " impf_set_list = [ent_fut.impact_funcs],\n", - " exp_list = litpop_list,\n", - " meas_set = ent_fut.measures,\n", + " impf_set_list=[ent_fut.impact_funcs],\n", + " exp_list=litpop_list,\n", + " meas_set=ent_fut.measures,\n", " bounds_cost=[0.6, 1.2],\n", " bounds_eg=[0.8, 1.5],\n", " bounds_noise=None,\n", " bounds_mdd=[0.7, 0.9],\n", " bounds_paa=[1.3, 2],\n", - " haz_id_dict={'TC': [1]}\n", - " )" + " haz_id_dict={\"TC\": [1]},\n", + ")" ] } ], diff --git a/doc/tutorial/climada_entity_DiscRates.ipynb b/doc/tutorial/climada_entity_DiscRates.ipynb index acb33de016..375e2167fc 100644 --- a/doc/tutorial/climada_entity_DiscRates.ipynb +++ b/doc/tutorial/climada_entity_DiscRates.ipynb @@ -74,11 +74,11 @@ "# Compute net present value between present year and future year.\n", "ini_year = 2019\n", "end_year = 2050\n", - "val_years = np.zeros(end_year-ini_year+1)\n", - "val_years[0] = 100000000 # initial investment\n", - "val_years[10:] = 75000 # maintenance from 10th year\n", + "val_years = np.zeros(end_year - ini_year + 1)\n", + "val_years[0] = 100000000 # initial investment\n", + "val_years[10:] = 75000 # maintenance from 10th year\n", "npv = disc.net_present_value(ini_year, end_year, val_years)\n", - "print('net present value: {:.5e}'.format(npv))" + "print(\"net present value: {:.5e}\".format(npv))" ] }, { @@ -135,8 +135,8 @@ "from climada.util import ENT_TEMPLATE_XLS\n", "\n", "# Fill DataFrame from Excel file\n", - "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", - "print('Read file:', ENT_TEMPLATE_XLS)\n", + "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", + "print(\"Read file:\", ENT_TEMPLATE_XLS)\n", "disc = DiscRates.from_excel(file_name)\n", "disc.plot();" ] @@ -170,11 +170,11 @@ "from climada.util import ENT_TEMPLATE_XLS\n", "\n", "# Fill DataFrame from Excel file\n", - "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", + "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", "disc = DiscRates.from_excel(file_name)\n", "\n", "# write file\n", - "disc.write_excel('results/tutorial_disc.xlsx')" + "disc.write_excel(\"results/tutorial_disc.xlsx\")" ] }, { @@ -192,8 +192,9 @@ "outputs": [], "source": [ "from climada.util.save import save\n", + "\n", "# this generates a results folder in the current path and stores the output there\n", - "save('tutorial_disc.p', disc)" + "save(\"tutorial_disc.p\", disc)" ] } ], diff --git a/doc/tutorial/climada_entity_Exposures.ipynb b/doc/tutorial/climada_entity_Exposures.ipynb index b5db1520ec..d46903e8f2 100644 --- a/doc/tutorial/climada_entity_Exposures.ipynb +++ b/doc/tutorial/climada_entity_Exposures.ipynb @@ -112,13 +112,15 @@ "# Fill a pandas DataFrame with the 3 mandatory variables (latitude, longitude, value) for a number of assets (10'000).\n", "# We will do this with random dummy data for purely illustrative reasons:\n", "exp_df = DataFrame()\n", - "n_exp = 100*100\n", + "n_exp = 100 * 100\n", "# provide value\n", - "exp_df['value'] = np.arange(n_exp)\n", + "exp_df[\"value\"] = np.arange(n_exp)\n", "# provide latitude and longitude\n", - "lat, lon = np.mgrid[15 : 35 : complex(0, np.sqrt(n_exp)), 20 : 40 : complex(0, np.sqrt(n_exp))]\n", - "exp_df['latitude'] = lat.flatten()\n", - "exp_df['longitude'] = lon.flatten()" + "lat, lon = np.mgrid[\n", + " 15 : 35 : complex(0, np.sqrt(n_exp)), 20 : 40 : complex(0, np.sqrt(n_exp))\n", + "]\n", + "exp_df[\"latitude\"] = lat.flatten()\n", + "exp_df[\"longitude\"] = lon.flatten()" ] }, { @@ -131,7 +133,7 @@ "# In this case, we only specify the IDs for tropical cyclone (TC); here, each exposure entry will be treated with\n", "# the same impact function: the one that has ID '1':\n", "# Of course, this will only be relevant at later steps during impact calculations.\n", - "exp_df['impf_TC'] = np.ones(n_exp, int)" + "exp_df[\"impf_TC\"] = np.ones(n_exp, int)" ] }, { @@ -156,8 +158,8 @@ ], "source": [ "# Let's have a look at the pandas DataFrame\n", - "print('exp_df is a DataFrame:', str(type(exp_df)))\n", - "print('exp_df looks like:')\n", + "print(\"exp_df is a DataFrame:\", str(type(exp_df)))\n", + "print(\"exp_df looks like:\")\n", "print(exp_df.head())" ] }, @@ -195,12 +197,12 @@ "# Generate Exposures from the pandas DataFrame. This step converts the DataFrame into\n", "# a CLIMADA Exposures instance!\n", "exp = Exposures(exp_df)\n", - "print('exp has the type:', str(type(exp)))\n", - "print('and contains a GeoDataFrame exp.gdf:', str(type(exp.gdf)))\n", + "print(\"exp has the type:\", str(type(exp)))\n", + "print(\"and contains a GeoDataFrame exp.gdf:\", str(type(exp.gdf)))\n", "\n", "# set geometry attribute (shapely Points) from GeoDataFrame from latitude and longitude\n", "exp.set_geometry_points()\n", - "print('\\n' + 'check method logs:')\n", + "print(\"\\n\" + \"check method logs:\")\n", "\n", "# always apply the check() method in the end. It puts metadata that has not been assigned,\n", "# and points out missing mandatory data\n", @@ -243,7 +245,7 @@ ], "source": [ "# let's have a look at the Exposures instance we created!\n", - "print('\\n' + 'exp looks like:')\n", + "print(\"\\n\" + \"exp looks like:\")\n", "print(exp)" ] }, @@ -292,9 +294,9 @@ "from climada.entity import Exposures\n", "\n", "# Read spatial info from an external file into GeoDataFrame\n", - "world = gpd.read_file(gpd.datasets.get_path('naturalearth_cities'))\n", - "print('World is a GeoDataFrame:', str(type(world)))\n", - "print('World looks like:')\n", + "world = gpd.read_file(gpd.datasets.get_path(\"naturalearth_cities\"))\n", + "print(\"World is a GeoDataFrame:\", str(type(world)))\n", + "print(\"World looks like:\")\n", "print(world.head())" ] }, @@ -317,9 +319,9 @@ "# Generate Exposures: value, latitude and longitude for each exposure entry.\n", "# Convert GeoDataFrame into Exposure instance\n", "exp_gpd = Exposures(world)\n", - "print('\\n' + 'exp_gpd is an Exposures:', str(type(exp_gpd)))\n", + "print(\"\\n\" + \"exp_gpd is an Exposures:\", str(type(exp_gpd)))\n", "# add random values to entries\n", - "exp_gpd.gdf['value'] = np.arange(world.shape[0])\n", + "exp_gpd.gdf[\"value\"] = np.arange(world.shape[0])\n", "# set latitude and longitude attributes from geometry\n", "exp_gpd.set_lat_lon()" ] @@ -348,8 +350,8 @@ "# In this case, we only specify the IDs for tropical cyclone (TC); here, each exposure entry will be treated with\n", "# the same impact function: the one that has ID '1':\n", "# Of course, this will only be relevant at later steps during impact calculations.\n", - "exp_gpd.gdf['impf_TC'] = np.ones(world.shape[0], int)\n", - "print('\\n' + 'check method logs:')\n", + "exp_gpd.gdf[\"impf_TC\"] = np.ones(world.shape[0], int)\n", + "print(\"\\n\" + \"check method logs:\")\n", "\n", "# as always, run check method to assign meta-data and check for missing mandatory variables.\n", "exp_gpd.check()" @@ -414,7 +416,7 @@ ], "source": [ "# let's have a look at the Exposures instance we created!\n", - "print('\\n' + '\\x1b[1;03;30;30m' + 'exp_gpd looks like:' + '\\x1b[0m')\n", + "print(\"\\n\" + \"\\x1b[1;03;30;30m\" + \"exp_gpd looks like:\" + \"\\x1b[0m\")\n", "print(exp_gpd)" ] }, @@ -536,7 +538,7 @@ "sel_exp = exp_gpd.copy() # to keep the original exp_gpd Exposures data\n", "sel_exp.gdf = sel_exp.gdf.cx[:, -5:5]\n", "\n", - "print('\\n' + 'sel_exp contains a subset of the original data')\n", + "print(\"\\n\" + \"sel_exp contains a subset of the original data\")\n", "sel_exp.gdf.head()" ] }, @@ -659,13 +661,14 @@ "source": [ "# Example 2: extract data in a polygon\n", "from shapely.geometry import Polygon\n", + "\n", "sel_polygon = exp_gpd.copy() # to keep the original exp_gpd Exposures data\n", "\n", "poly = Polygon([(0, -10), (0, 10), (10, 5)])\n", "sel_polygon.gdf = sel_polygon.gdf[sel_polygon.gdf.intersects(poly)]\n", "\n", "# Let's have a look. Again, the sub-selection is a GeoDataFrame!\n", - "print('\\n' + 'sel_exp contains a subset of the original data')\n", + "print(\"\\n\" + \"sel_exp contains a subset of the original data\")\n", "sel_polygon.gdf" ] }, @@ -799,8 +802,10 @@ "# Example 3: change coordinate reference system\n", "# use help to see more options: help(sel_exp.to_crs)\n", "sel_polygon.to_crs(epsg=3395, inplace=True)\n", - "print('\\n' + 'the crs has changed to ' +str(sel_polygon.crs))\n", - "print('the values for latitude and longitude are now according to the new coordinate system: ')\n", + "print(\"\\n\" + \"the crs has changed to \" + str(sel_polygon.crs))\n", + "print(\n", + " \"the values for latitude and longitude are now according to the new coordinate system: \"\n", + ")\n", "sel_polygon.gdf" ] }, @@ -922,8 +927,8 @@ "exp_all = Exposures.concat([sel_polygon, sel_exp.to_crs(epsg=3395)])\n", "\n", "# the output is of type Exposures\n", - "print('exp_all type and number of rows:', type(exp_all), exp_all.gdf.shape[0])\n", - "print('number of unique rows:', exp_all.gdf.drop_duplicates().shape[0])\n", + "print(\"exp_all type and number of rows:\", type(exp_all), exp_all.gdf.shape[0])\n", + "print(\"number of unique rows:\", exp_all.gdf.drop_duplicates().shape[0])\n", "\n", "# NaNs will appear in the missing values\n", "exp_all.gdf.head()" @@ -1103,8 +1108,8 @@ "exp_templ = pd.read_excel(file_name)\n", "\n", "# Let's have a look at the data:\n", - "print('exp_templ is a DataFrame:', str(type(exp_templ)))\n", - "print('exp_templ looks like:')\n", + "print(\"exp_templ is a DataFrame:\", str(type(exp_templ)))\n", + "print(\"exp_templ looks like:\")\n", "exp_templ.head()" ] }, @@ -1145,14 +1150,14 @@ "source": [ "# Generate an Exposures instance from the dataframe.\n", "exp_templ = Exposures(exp_templ)\n", - "print('\\n' + 'exp_templ is now an Exposures:', str(type(exp_templ)))\n", + "print(\"\\n\" + \"exp_templ is now an Exposures:\", str(type(exp_templ)))\n", "\n", "# set geometry attribute (shapely Points) from GeoDataFrame from latitude and longitude\n", - "print('\\n' + 'set_geometry logs:')\n", + "print(\"\\n\" + \"set_geometry logs:\")\n", "exp_templ.set_geometry_points()\n", "# as always, run check method to include metadata and check for missing mandatory parameters\n", "\n", - "print('\\n' + 'check exp_templ:')\n", + "print(\"\\n\" + \"check exp_templ:\")\n", "exp_templ.check()" ] }, @@ -1314,7 +1319,7 @@ ], "source": [ "# Let's have a look at our Exposures instance!\n", - "print('\\n' + 'exp_templ.gdf looks like:')\n", + "print(\"\\n\" + \"exp_templ.gdf looks like:\")\n", "exp_templ.gdf.head()" ] }, @@ -1347,7 +1352,7 @@ "\n", "# We take an example with a dummy raster file (HAZ_DEMO_FL), running the method set_from_raster directly loads the\n", "# necessary info from the file into an Exposures instance.\n", - "exp_raster = Exposures.from_raster(HAZ_DEMO_FL, window= Window(10, 20, 50, 60))\n", + "exp_raster = Exposures.from_raster(HAZ_DEMO_FL, window=Window(10, 20, 50, 60))\n", "# There are several keyword argument options that come with the set_from_raster method (such as\n", "# specifying a window, if not the entire file should be read, or a bounding box. Check them out." ] @@ -1376,7 +1381,7 @@ "source": [ "# As always, run the check method, such that metadata can be assigned and checked for missing mandatory parameters.\n", "exp_raster.check()\n", - "print('Meta:', exp_raster.meta)" + "print(\"Meta:\", exp_raster.meta)" ] }, { @@ -1475,7 +1480,7 @@ ], "source": [ "# Let's have a look at the Exposures instance!\n", - "print('\\n' + 'exp_raster looks like:')\n", + "print(\"\\n\" + \"exp_raster looks like:\")\n", "exp_raster.gdf.head()" ] }, @@ -1567,7 +1572,7 @@ ], "source": [ "# Example 1: plot_hexbin method\n", - "print('Plotting exp_df.')\n", + "print(\"Plotting exp_df.\")\n", "axs = exp.plot_hexbin();\n", "\n", "# further methods to check out:\n", @@ -1606,7 +1611,7 @@ "source": [ "# Example 2: plot_scatter method\n", "\n", - "exp_gpd.to_crs('epsg:3035', inplace=True)\n", + "exp_gpd.to_crs(\"epsg:3035\", inplace=True)\n", "exp_gpd.plot_scatter(pop_name=False);" ] }, @@ -1637,9 +1642,19 @@ ], "source": [ "# Example 3: plot_raster method\n", - "from climada.util.plot import add_cntry_names # use climada's plotting utilities\n", - "ax = exp.plot_raster(); # plot with same resolution as data\n", - "add_cntry_names(ax, [exp.gdf['longitude'].min(), exp.gdf['longitude'].max(), exp.gdf['latitude'].min(), exp.gdf['latitude'].max()])\n", + "from climada.util.plot import add_cntry_names # use climada's plotting utilities\n", + "\n", + "ax = exp.plot_raster()\n", + "# plot with same resolution as data\n", + "add_cntry_names(\n", + " ax,\n", + " [\n", + " exp.gdf[\"longitude\"].min(),\n", + " exp.gdf[\"longitude\"].max(),\n", + " exp.gdf[\"latitude\"].min(),\n", + " exp.gdf[\"latitude\"].max(),\n", + " ],\n", + ")\n", "\n", "# use keyword argument save_tiff='filepath.tiff' to save the corresponding raster in tiff format\n", "# use keyword argument raster_res='desired number' to change resolution of the raster." @@ -1674,11 +1689,16 @@ "source": [ "# Example 4: plot_basemap method\n", "import contextily as ctx\n", + "\n", "# select the background image from the available ctx.providers\n", - "ax = exp_templ.plot_basemap(buffer=30000, cmap='brg'); # using Positron from CartoDB\n", - "ax = exp_templ.plot_basemap(buffer=30000, cmap='brg',\n", - " url=ctx.providers.OpenStreetMap.Mapnik, # Using OpenStreetmap,\n", - " zoom=9); # select the zoom level of the map, affects the font size of labelled objects" + "ax = exp_templ.plot_basemap(buffer=30000, cmap=\"brg\")\n", + "# using Positron from CartoDB\n", + "ax = exp_templ.plot_basemap(\n", + " buffer=30000,\n", + " cmap=\"brg\",\n", + " url=ctx.providers.OpenStreetMap.Mapnik, # Using OpenStreetmap,\n", + " zoom=9,\n", + "); # select the zoom level of the map, affects the font size of labelled objects" ] }, { @@ -1718,7 +1738,7 @@ ], "source": [ "# other visualization types\n", - "exp_templ.gdf.hist(column='value');" + "exp_templ.gdf.hist(column=\"value\");" ] }, { @@ -1737,12 +1757,15 @@ "metadata": {}, "outputs": [], "source": [ - "import fiona; fiona.supported_drivers\n", + "import fiona\n", + "\n", + "fiona.supported_drivers\n", "from climada import CONFIG\n", + "\n", "results = CONFIG.local_data.save_dir.dir()\n", "\n", "# DataFrame save to csv format. geometry writen as string, metadata not saved!\n", - "exp_templ.gdf.to_csv(results.joinpath('exp_templ.csv'), sep='\\t')" + "exp_templ.gdf.to_csv(results.joinpath(\"exp_templ.csv\"), sep=\"\\t\")" ] }, { @@ -1752,7 +1775,7 @@ "outputs": [], "source": [ "# write as hdf5 file\n", - "exp_templ.write_hdf5(results.joinpath('exp_temp.h5'))" + "exp_templ.write_hdf5(results.joinpath(\"exp_temp.h5\"))" ] }, { @@ -1771,8 +1794,9 @@ "source": [ "# save in pickle format\n", "from climada.util.save import save\n", + "\n", "# this generates a results folder in the current path and stores the output there\n", - "save('exp_templ.pkl.p', exp_templ) # creates results folder and stores there" + "save(\"exp_templ.pkl.p\", exp_templ) # creates results folder and stores there" ] }, { @@ -1814,7 +1838,7 @@ "source": [ "# set_geometry_points is expensive for big exposures\n", "# for small amount of data, the execution time might be even greater when using dask\n", - "exp.gdf.drop(columns=['geometry'], inplace=True)\n", + "exp.gdf.drop(columns=[\"geometry\"], inplace=True)\n", "print(exp.gdf.head())\n", "%time exp.set_geometry_points(scheduler='processes')\n", "print(exp.gdf.head())" diff --git a/doc/tutorial/climada_entity_Exposures_polygons_lines.ipynb b/doc/tutorial/climada_entity_Exposures_polygons_lines.ipynb index 22c5827f2c..904d00f4d4 100644 --- a/doc/tutorial/climada_entity_Exposures_polygons_lines.ipynb +++ b/doc/tutorial/climada_entity_Exposures_polygons_lines.ipynb @@ -58,11 +58,13 @@ "from climada.entity.impact_funcs.storm_europe import ImpfStormEurope\n", "from climada.entity import Exposures\n", "\n", - "HAZ = Client().get_hazard('storm_europe', name='test_haz_WS_nl', status='test_dataset');\n", + "HAZ = Client().get_hazard(\"storm_europe\", name=\"test_haz_WS_nl\", status=\"test_dataset\")\n", "\n", - "EXP_POLY = Client().get_exposures('base', name='test_polygon_exp', status='test_dataset');\n", - "EXP_LINE = Client().get_exposures('base', name='test_line_exp', status='test_dataset');\n", - "EXP_POINT = Client().get_exposures('base', name='test_point_exp', status='test_dataset');\n", + "EXP_POLY = Client().get_exposures(\n", + " \"base\", name=\"test_polygon_exp\", status=\"test_dataset\"\n", + ")\n", + "EXP_LINE = Client().get_exposures(\"base\", name=\"test_line_exp\", status=\"test_dataset\")\n", + "EXP_POINT = Client().get_exposures(\"base\", name=\"test_point_exp\", status=\"test_dataset\")\n", "\n", "EXP_MIX = Exposures.concat([EXP_POLY, EXP_LINE, EXP_POINT])\n", "\n", @@ -109,15 +111,20 @@ } ], "source": [ - "#disaggregate in the same CRS as the exposures are defined (here degrees), resolution 1degree\n", - "#divide values on points\n", - "#aggregate by summing\n", + "# disaggregate in the same CRS as the exposures are defined (here degrees), resolution 1degree\n", + "# divide values on points\n", + "# aggregate by summing\n", "\n", "impact = u_lp.calc_geom_impact(\n", - " exp=EXP_MIX, impf_set=IMPF_SET, haz=HAZ,\n", - " res=0.2, to_meters=False, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", - " )" + " exp=EXP_MIX,\n", + " impf_set=IMPF_SET,\n", + " haz=HAZ,\n", + " res=0.2,\n", + " to_meters=False,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", + ")" ] }, { @@ -170,15 +177,20 @@ } ], "source": [ - "#disaggregate in meters\n", - "#same value for each point, fixed to 1 (allows to get percentages of affected surface/distance)\n", - "#aggregate by summing\n", + "# disaggregate in meters\n", + "# same value for each point, fixed to 1 (allows to get percentages of affected surface/distance)\n", + "# aggregate by summing\n", "\n", "impact = u_lp.calc_geom_impact(\n", - " exp=EXP_MIX, impf_set=IMPF_SET, haz=HAZ,\n", - " res=1000, to_meters=True, disagg_met=u_lp.DisaggMethod.FIX, disagg_val=1.0,\n", - " agg_met=u_lp.AggMethod.SUM\n", - " );" + " exp=EXP_MIX,\n", + " impf_set=IMPF_SET,\n", + " haz=HAZ,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.FIX,\n", + " disagg_val=1.0,\n", + " agg_met=u_lp.AggMethod.SUM,\n", + ");" ] }, { @@ -206,7 +218,10 @@ ], "source": [ "import matplotlib.pyplot as plt\n", - "ax = u_lp.plot_eai_exp_geom(impact, legend_kwds={'label': 'percentage', 'orientation': 'horizontal'})" + "\n", + "ax = u_lp.plot_eai_exp_geom(\n", + " impact, legend_kwds={\"label\": \"percentage\", \"orientation\": \"horizontal\"}\n", + ")" ] }, { @@ -282,36 +297,60 @@ " from climada_petals.entity.exposures.black_marble import country_iso_geom\n", "\n", " # open the file containing the Netherlands admin-1 polygons\n", - " shp_file = shapereader.natural_earth(resolution='10m',\n", - " category='cultural',\n", - " name='admin_0_countries')\n", + " shp_file = shapereader.natural_earth(\n", + " resolution=\"10m\", category=\"cultural\", name=\"admin_0_countries\"\n", + " )\n", " shp_file = shapereader.Reader(shp_file)\n", "\n", " # extract the NL polygons\n", - " prov_names = {'Netherlands': ['Groningen', 'Drenthe',\n", - " 'Overijssel', 'Gelderland',\n", - " 'Limburg', 'Zeeland',\n", - " 'Noord-Brabant', 'Zuid-Holland',\n", - " 'Noord-Holland', 'Friesland',\n", - " 'Flevoland', 'Utrecht']\n", - " }\n", - " polygon_Netherlands, polygons_prov_NL = country_iso_geom(prov_names,\n", - " shp_file)\n", - " prov_geom_NL = {prov: geom for prov, geom in zip(list(prov_names.values())[0], list(polygons_prov_NL.values())[0])}\n", + " prov_names = {\n", + " \"Netherlands\": [\n", + " \"Groningen\",\n", + " \"Drenthe\",\n", + " \"Overijssel\",\n", + " \"Gelderland\",\n", + " \"Limburg\",\n", + " \"Zeeland\",\n", + " \"Noord-Brabant\",\n", + " \"Zuid-Holland\",\n", + " \"Noord-Holland\",\n", + " \"Friesland\",\n", + " \"Flevoland\",\n", + " \"Utrecht\",\n", + " ]\n", + " }\n", + " polygon_Netherlands, polygons_prov_NL = country_iso_geom(prov_names, shp_file)\n", + " prov_geom_NL = {\n", + " prov: geom\n", + " for prov, geom in zip(\n", + " list(prov_names.values())[0], list(polygons_prov_NL.values())[0]\n", + " )\n", + " }\n", "\n", " # assign a value to each admin-1 area (assumption 100'000 USD per inhabitant)\n", - " population_prov_NL = {'Drenthe':493449, 'Flevoland':422202,\n", - " 'Friesland':649988, 'Gelderland':2084478,\n", - " 'Groningen':585881, 'Limburg':1118223,\n", - " 'Noord-Brabant':2562566, 'Noord-Holland':2877909,\n", - " 'Overijssel':1162215, 'Zuid-Holland':3705625,\n", - " 'Utrecht':1353596, 'Zeeland':383689}\n", - " value_prov_NL = {n: 100000 * population_prov_NL[n] for n in population_prov_NL.keys()}\n", + " population_prov_NL = {\n", + " \"Drenthe\": 493449,\n", + " \"Flevoland\": 422202,\n", + " \"Friesland\": 649988,\n", + " \"Gelderland\": 2084478,\n", + " \"Groningen\": 585881,\n", + " \"Limburg\": 1118223,\n", + " \"Noord-Brabant\": 2562566,\n", + " \"Noord-Holland\": 2877909,\n", + " \"Overijssel\": 1162215,\n", + " \"Zuid-Holland\": 3705625,\n", + " \"Utrecht\": 1353596,\n", + " \"Zeeland\": 383689,\n", + " }\n", + " value_prov_NL = {\n", + " n: 100000 * population_prov_NL[n] for n in population_prov_NL.keys()\n", + " }\n", "\n", " # combine into GeoDataFrame and add a coordinate reference system to it:\n", - " df1 = pd.DataFrame.from_dict(population_prov_NL, orient='index', columns=['population']).join(\n", - " pd.DataFrame.from_dict(value_prov_NL, orient='index', columns=['value']))\n", - " df1['geometry'] = [prov_geom_NL[prov] for prov in df1.index]\n", + " df1 = pd.DataFrame.from_dict(\n", + " population_prov_NL, orient=\"index\", columns=[\"population\"]\n", + " ).join(pd.DataFrame.from_dict(value_prov_NL, orient=\"index\", columns=[\"value\"]))\n", + " df1[\"geometry\"] = [prov_geom_NL[prov] for prov in df1.index]\n", " gdf_polys = gpd.GeoDataFrame(df1)\n", " gdf_polys = gdf_polys.set_crs(epsg=4326)\n", " return gdf_polys" @@ -417,7 +456,7 @@ ], "source": [ "exp_nl_poly = Exposures(gdf_poly())\n", - "exp_nl_poly.gdf['impf_WS'] = 1\n", + "exp_nl_poly.gdf[\"impf_WS\"] = 1\n", "exp_nl_poly.gdf.head()" ] }, @@ -456,7 +495,7 @@ ], "source": [ "# take a look\n", - "exp_nl_poly.gdf.plot('value', legend=True, cmap='OrRd')" + "exp_nl_poly.gdf.plot(\"value\", legend=True, cmap=\"OrRd\")" ] }, { @@ -557,9 +596,13 @@ ], "source": [ "imp_deg = u_lp.calc_geom_impact(\n", - " exp=exp_nl_poly, impf_set=impf_set, haz=storms,\n", - " res=0.005, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", + " exp=exp_nl_poly,\n", + " impf_set=impf_set,\n", + " haz=storms,\n", + " res=0.005,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", ")" ] }, @@ -621,9 +664,14 @@ ], "source": [ "imp_m = u_lp.calc_geom_impact(\n", - " exp=exp_nl_poly, impf_set=impf_set, haz=storms,\n", - " res=500, to_meters=True, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", + " exp=exp_nl_poly,\n", + " impf_set=impf_set,\n", + " haz=storms,\n", + " res=500,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", ")" ] }, @@ -711,15 +759,14 @@ }, "outputs": [], "source": [ - "#regular grid from exposures bounds\n", + "# regular grid from exposures bounds\n", "import climada.util.coordinates as u_coord\n", + "\n", "res = 0.1\n", "(_, _, xmax, ymax) = exp_nl_poly.gdf.geometry.bounds.max()\n", "(xmin, ymin, _, _) = exp_nl_poly.gdf.geometry.bounds.min()\n", "bounds = (xmin, ymin, xmax, ymax)\n", - "height, width, trafo = u_coord.pts_to_raster_meta(\n", - " bounds, (res, res)\n", - " )\n", + "height, width, trafo = u_coord.pts_to_raster_meta(bounds, (res, res))\n", "x_grid, y_grid = u_coord.raster_to_meshgrid(trafo, width, height)" ] }, @@ -747,9 +794,13 @@ ], "source": [ "imp_g = u_lp.calc_grid_impact(\n", - " exp=exp_nl_poly, impf_set=impf_set, haz=storms,\n", - " grid=(x_grid, y_grid), disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", + " exp=exp_nl_poly,\n", + " impf_set=impf_set,\n", + " haz=storms,\n", + " grid=(x_grid, y_grid),\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", ")" ] }, @@ -931,8 +982,11 @@ "source": [ "# Disaggregate exposure to 10'000 metre grid, each point gets average value within polygon.\n", "exp_pnt = u_lp.exp_geom_to_pnt(\n", - " exp_nl_poly, res=10000, to_meters=True,\n", - " disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None\n", + " exp_nl_poly,\n", + " res=10000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", ")\n", "exp_pnt.gdf.head()" ] @@ -1073,8 +1127,12 @@ "source": [ "# Disaggregate exposure to 0.1° grid, no value disaggregation specified --> replicate initial value\n", "exp_pnt2 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_poly, res=0.1, to_meters=False,\n", - " disagg_met=u_lp.DisaggMethod.FIX, disagg_val=None)\n", + " exp_nl_poly,\n", + " res=0.1,\n", + " to_meters=False,\n", + " disagg_met=u_lp.DisaggMethod.FIX,\n", + " disagg_val=None,\n", + ")\n", "exp_pnt2.gdf.head()" ] }, @@ -1214,8 +1272,12 @@ "# Disaggregate exposure to 1'000 metre grid, each point gets value corresponding to\n", "# its representative area (1'000^2).\n", "exp_pnt3 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_poly, res=1000, to_meters=True,\n", - " disagg_met=u_lp.DisaggMethod.FIX, disagg_val=10e6)\n", + " exp_nl_poly,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.FIX,\n", + " disagg_val=10e6,\n", + ")\n", "exp_pnt3.gdf.head()" ] }, @@ -1355,8 +1417,12 @@ "# Disaggregate exposure to 1'000 metre grid, each point gets value corresponding to 1\n", "# After dissagregation, each point has a value equal to the percentage of area of the polygon\n", "exp_pnt4 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_poly, res=1000, to_meters=True,\n", - " disagg_met=u_lp.DisaggMethod.DIV, disagg_val=1)\n", + " exp_nl_poly,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=1,\n", + ")\n", "exp_pnt4.gdf.tail()" ] }, @@ -1494,19 +1560,18 @@ ], "source": [ "# disaggregate on pre-defined grid\n", - "#regular grid from exposures bounds\n", + "# regular grid from exposures bounds\n", "import climada.util.coordinates as u_coord\n", + "\n", "res = 0.1\n", "(_, _, xmax, ymax) = exp_nl_poly.gdf.geometry.bounds.max()\n", "(xmin, ymin, _, _) = exp_nl_poly.gdf.geometry.bounds.min()\n", "bounds = (xmin, ymin, xmax, ymax)\n", - "height, width, trafo = u_coord.pts_to_raster_meta(\n", - " bounds, (res, res)\n", - " )\n", + "height, width, trafo = u_coord.pts_to_raster_meta(bounds, (res, res))\n", "x_grid, y_grid = u_coord.raster_to_meshgrid(trafo, width, height)\n", "exp_pnt5 = u_lp.exp_geom_to_grid(\n", - " exp_nl_poly, grid=(x_grid, y_grid),\n", - " disagg_met=u_lp.DisaggMethod.DIV, disagg_val=1)\n", + " exp_nl_poly, grid=(x_grid, y_grid), disagg_met=u_lp.DisaggMethod.DIV, disagg_val=1\n", + ")\n", "exp_pnt5.gdf.tail()" ] }, @@ -1589,7 +1654,7 @@ ], "source": [ "# Plot point-impacts and aggregated impacts\n", - "imp_pnt.plot_hexbin_eai_exposure();\n", + "imp_pnt.plot_hexbin_eai_exposure()\n", "u_lp.plot_eai_exp_geom(imp_geom);" ] }, @@ -1727,7 +1792,7 @@ "outputs": [], "source": [ "def gdf_lines():\n", - " gdf_lines = gpd.read_file(Path(DEMO_DIR,'nl_rails.gpkg'))\n", + " gdf_lines = gpd.read_file(Path(DEMO_DIR, \"nl_rails.gpkg\"))\n", " gdf_lines = gdf_lines.to_crs(epsg=4326)\n", " return gdf_lines" ] @@ -1832,8 +1897,8 @@ ], "source": [ "exp_nl_lines = Exposures(gdf_lines())\n", - "exp_nl_lines.gdf['impf_WS'] = 1\n", - "exp_nl_lines.gdf['value'] = 1\n", + "exp_nl_lines.gdf[\"impf_WS\"] = 1\n", + "exp_nl_lines.gdf[\"value\"] = 1\n", "exp_nl_lines.gdf.head()" ] }, @@ -1861,7 +1926,7 @@ } ], "source": [ - "exp_nl_lines.gdf.plot('value', cmap='inferno');" + "exp_nl_lines.gdf.plot(\"value\", cmap=\"inferno\");" ] }, { @@ -1911,9 +1976,13 @@ ], "source": [ "imp_deg = u_lp.calc_geom_impact(\n", - " exp=exp_nl_lines, impf_set=impf_set, haz=storms,\n", - " res=0.005, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", + " exp=exp_nl_lines,\n", + " impf_set=impf_set,\n", + " haz=storms,\n", + " res=0.005,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", ")" ] }, @@ -1975,9 +2044,14 @@ ], "source": [ "imp_m = u_lp.calc_geom_impact(\n", - " exp=exp_nl_lines, impf_set=impf_set, haz=storms,\n", - " res=500, to_meters=True, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", + " exp=exp_nl_lines,\n", + " impf_set=impf_set,\n", + " haz=storms,\n", + " res=500,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", ")" ] }, @@ -2028,8 +2102,11 @@ ], "source": [ "import numpy as np\n", + "\n", "diff = np.max((imp_deg.eai_exp - imp_m.eai_exp) / imp_deg.eai_exp)\n", - "print(f\"The largest relative different between degrees and meters impact in this example is {diff}\")" + "print(\n", + " f\"The largest relative different between degrees and meters impact in this example is {diff}\"\n", + ")" ] }, { @@ -2184,7 +2261,11 @@ "source": [ "# 0.1° distance between points, average value disaggregation\n", "exp_pnt = u_lp.exp_geom_to_pnt(\n", - " exp_nl_lines, res=0.1, to_meters=False, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None\n", + " exp_nl_lines,\n", + " res=0.1,\n", + " to_meters=False,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", ")\n", "exp_pnt.gdf.head()" ] @@ -2317,7 +2398,11 @@ "source": [ "# 1000m distance between points, no value disaggregation\n", "exp_pnt2 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_lines, res=1000, to_meters=True, disagg_met=u_lp.DisaggMethod.FIX, disagg_val=None\n", + " exp_nl_lines,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.FIX,\n", + " disagg_val=None,\n", ")\n", "exp_pnt2.gdf.head()" ] @@ -2450,7 +2535,11 @@ "source": [ "# 1000m distance between points, equal value disaggregation\n", "exp_pnt3 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_lines, res=1000, to_meters=True, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None\n", + " exp_nl_lines,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", ")\n", "exp_pnt3.gdf.head()" ] @@ -2583,7 +2672,11 @@ "source": [ "# 1000m distance between points, disaggregation of value according to representative distance\n", "exp_pnt4 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_lines, res=1000, to_meters=True, disagg_met=u_lp.DisaggMethod.FIX, disagg_val=1000\n", + " exp_nl_lines,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.FIX,\n", + " disagg_val=1000,\n", ")\n", "exp_pnt4.gdf.head()" ] diff --git a/doc/tutorial/climada_entity_ImpactFuncSet.ipynb b/doc/tutorial/climada_entity_ImpactFuncSet.ipynb index 2702aa60f8..6df482925f 100644 --- a/doc/tutorial/climada_entity_ImpactFuncSet.ipynb +++ b/doc/tutorial/climada_entity_ImpactFuncSet.ipynb @@ -113,7 +113,7 @@ ")\n", "\n", "# check if the all the attributes are set correctly\n", - "imp_fun.check()\n" + "imp_fun.check()" ] }, { @@ -131,7 +131,7 @@ ], "source": [ "# Calculate the mdr at hazard intensity 18.7 m/s\n", - "print('Mean damage ratio at intensity 18.7 m/s: ', imp_fun.calc_mdr(18.7))" + "print(\"Mean damage ratio at intensity 18.7 m/s: \", imp_fun.calc_mdr(18.7))" ] }, { @@ -282,7 +282,7 @@ "imp_fun_3.check()\n", "\n", "# add the 2 impact functions into ImpactFuncSet\n", - "imp_fun_set = ImpactFuncSet([imp_fun_1, imp_fun_3])\n" + "imp_fun_set = ImpactFuncSet([imp_fun_1, imp_fun_3])" ] }, { @@ -345,7 +345,7 @@ ], "source": [ "# extract the TC impact function with id 1\n", - "impf_tc_1 = imp_fun_set.get_func('TC', 1)\n", + "impf_tc_1 = imp_fun_set.get_func(\"TC\", 1)\n", "# plot the impact function\n", "impf_tc_1.plot();" ] @@ -404,7 +404,7 @@ ], "source": [ "# removing the TC impact function with id 3\n", - "imp_fun_set.remove_func('TC', 3)\n", + "imp_fun_set.remove_func(\"TC\", 3)\n", "# plot all the remaining impact functions in imp_fun_set\n", "imp_fun_set.plot();" ] @@ -464,7 +464,7 @@ "# plot all the impact functions from the ImpactFuncSet\n", "imp_set_xlsx.plot()\n", "# adjust the plots\n", - "plt.subplots_adjust(right=1., top=4., hspace=0.4, wspace=0.4)" + "plt.subplots_adjust(right=1.0, top=4.0, hspace=0.4, wspace=0.4)" ] }, { @@ -483,7 +483,7 @@ "outputs": [], "source": [ "# write imp_set_xlsx into an excel file\n", - "imp_set_xlsx.write_excel('tutorial_impf_set.xlsx')" + "imp_set_xlsx.write_excel(\"tutorial_impf_set.xlsx\")" ] }, { @@ -512,7 +512,7 @@ "from climada.util.save import save\n", "\n", "# this generates a results folder in the current path and stores the output there\n", - "save('tutorial_impf_set.p', imp_set_xlsx)" + "save(\"tutorial_impf_set.p\", imp_set_xlsx)" ] }, { @@ -563,7 +563,7 @@ "# plot all the impact functions\n", "imp_fun_set_TC.plot()\n", "# adjust the plots\n", - "plt.subplots_adjust(right=1., top=4., hspace=0.4, wspace=0.4)" + "plt.subplots_adjust(right=1.0, top=4.0, hspace=0.4, wspace=0.4)" ] } ], diff --git a/doc/tutorial/climada_entity_LitPop.ipynb b/doc/tutorial/climada_entity_LitPop.ipynb index 8625fe394c..56c2d065a0 100644 --- a/doc/tutorial/climada_entity_LitPop.ipynb +++ b/doc/tutorial/climada_entity_LitPop.ipynb @@ -155,15 +155,19 @@ "source": [ "# Initiate a default LitPop exposure entity for Switzerland and Liechtenstein (ISO3-Codes 'CHE' and 'LIE'):\n", "try:\n", - " exp = LitPop.from_countries(['CHE', 'Liechtenstein']) # you can provide either single countries or a list of countries\n", + " exp = LitPop.from_countries(\n", + " [\"CHE\", \"Liechtenstein\"]\n", + " ) # you can provide either single countries or a list of countries\n", "except FileExistsError as err:\n", - " print(\"Reason for error: The GPW population data has not been downloaded, c.f. section 'Input data' above.\")\n", + " print(\n", + " \"Reason for error: The GPW population data has not been downloaded, c.f. section 'Input data' above.\"\n", + " )\n", " raise err\n", - "exp.plot_scatter();\n", + "exp.plot_scatter()\n", "\n", "# Note that `exp.gdf['region_id']` is a number identifying each country:\n", - "print('\\n Region IDs (`region_id`) in this exposure:')\n", - "print(exp.gdf['region_id'].unique())" + "print(\"\\n Region IDs (`region_id`) in this exposure:\")\n", + "print(exp.gdf[\"region_id\"].unique())" ] }, { @@ -240,9 +244,12 @@ ], "source": [ "# Initiate a LitPop exposure entity for Costa Rica with varied resolution, fin_mode, and exponents:\n", - "exp = LitPop.from_countries('Costa Rica', fin_mode='income_group', res_arcsec=120, exponents=(1,1)) # change the parameters and see what happens...\n", + "exp = LitPop.from_countries(\n", + " \"Costa Rica\", fin_mode=\"income_group\", res_arcsec=120, exponents=(1, 1)\n", + ") # change the parameters and see what happens...\n", "# exp = LitPop.from_countries('Costa Rica', fin_mode='gdp', res_arcsec=90, exponents=(3,0)) # example of variation\n", - "exp.plot_raster(); # note the log scale of the colorbar\n", + "exp.plot_raster()\n", + "# note the log scale of the colorbar\n", "exp.plot_scatter();" ] }, @@ -312,12 +319,16 @@ "source": [ "# You may want to check if you have downloaded dataset Gridded Population of the World (GPW), v4: Population Count, v4.11\n", "# (2000 and 2020) first\n", - "pop_2000 = LitPop.from_countries('CHE', fin_mode='pop', res_arcsec=300, exponents=(0,1), reference_year=2000)\n", + "pop_2000 = LitPop.from_countries(\n", + " \"CHE\", fin_mode=\"pop\", res_arcsec=300, exponents=(0, 1), reference_year=2000\n", + ")\n", "# Alternatively, we ca use `from_population`:\n", - "pop_2021 = LitPop.from_population(countries='Switzerland', res_arcsec=300, reference_year=2021)\n", + "pop_2021 = LitPop.from_population(\n", + " countries=\"Switzerland\", res_arcsec=300, reference_year=2021\n", + ")\n", "# Since no population data for 2021 is available, the closest data point, 2020, is used (see LOGGER.warning)\n", - "pop_2000.plot_scatter();\n", - "pop_2021.plot_scatter();\n", + "pop_2000.plot_scatter()\n", + "pop_2021.plot_scatter()\n", "\"\"\"Note the difference in total values on the color bar.\"\"\"" ] }, @@ -398,16 +409,18 @@ } ], "source": [ - "res = 30 # If you don't get an output after a very long time with country = \"MEX\", try with res = 100\n", - "country = 'JAM' # Try different countries, i.e. 'JAM', 'CHE', 'RWA', 'MEX'\n", - "markersize = 4 # for plotting\n", - "buffer_deg=.04\n", - "\n", - "exp_nightlights = LitPop.from_nightlight_intensity(countries=country, res_arcsec=res) # nightlight intensity\n", - "exp_nightlights.plot_hexbin(linewidth=markersize, buffer=buffer_deg);\n", + "res = 30 # If you don't get an output after a very long time with country = \"MEX\", try with res = 100\n", + "country = \"JAM\" # Try different countries, i.e. 'JAM', 'CHE', 'RWA', 'MEX'\n", + "markersize = 4 # for plotting\n", + "buffer_deg = 0.04\n", + "\n", + "exp_nightlights = LitPop.from_nightlight_intensity(\n", + " countries=country, res_arcsec=res\n", + ") # nightlight intensity\n", + "exp_nightlights.plot_hexbin(linewidth=markersize, buffer=buffer_deg)\n", "# Compare to the population map:\n", "exp_population = LitPop().from_population(countries=country, res_arcsec=res)\n", - "exp_population.plot_hexbin(linewidth=markersize, buffer=buffer_deg);\n", + "exp_population.plot_hexbin(linewidth=markersize, buffer=buffer_deg)\n", "# Compare to default LitPop exposures:\n", "exp = LitPop.from_countries(countries=country, res_arcsec=res)\n", "exp.plot_hexbin(linewidth=markersize, buffer=buffer_deg);" @@ -495,29 +508,31 @@ "import climada.util.coordinates as u_coord\n", "import climada.entity.exposures.litpop as lp\n", "\n", - "country_iso3a = 'USA'\n", - "state_name = 'Florida'\n", + "country_iso3a = \"USA\"\n", + "state_name = \"Florida\"\n", "reslution_arcsec = 600\n", "\"\"\"First, we need to get the shape of Florida:\"\"\"\n", "admin1_info, admin1_shapes = u_coord.get_admin1_info(country_iso3a)\n", "admin1_info = admin1_info[country_iso3a]\n", "admin1_shapes = admin1_shapes[country_iso3a]\n", - "admin1_names = [record['name'] for record in admin1_info]\n", + "admin1_names = [record[\"name\"] for record in admin1_info]\n", "print(admin1_names)\n", "for idx, name in enumerate(admin1_names):\n", - " if admin1_names[idx]==state_name:\n", + " if admin1_names[idx] == state_name:\n", " break\n", - "print('Florida index: ' + str(idx))\n", + "print(\"Florida index: \" + str(idx))\n", "\n", "\"\"\"Secondly, we estimate the `total_value`\"\"\"\n", "# `total_value` required user input for `from_shape`, here we assume 5% of total value of the whole USA:\n", - "total_value = 0.05 * lp._get_total_value_per_country(country_iso3a, 'pc', 2020)\n", + "total_value = 0.05 * lp._get_total_value_per_country(country_iso3a, \"pc\", 2020)\n", "\n", "\"\"\"Then, we can initiate the exposures for Florida:\"\"\"\n", "start = time.process_time()\n", - "exp = LitPop.from_shape(admin1_shapes[idx], total_value, res_arcsec=600, reference_year=2020)\n", - "print(f'\\n Runtime `from_shape` : {time.process_time() - start:1.2f} sec.\\n')\n", - "exp.plot_scatter(vmin=100, buffer=.5);\n" + "exp = LitPop.from_shape(\n", + " admin1_shapes[idx], total_value, res_arcsec=600, reference_year=2020\n", + ")\n", + "print(f\"\\n Runtime `from_shape` : {time.process_time() - start:1.2f} sec.\\n\")\n", + "exp.plot_scatter(vmin=100, buffer=0.5);" ] }, { @@ -561,9 +576,13 @@ "# `from_shape_and_countries` does not require `total_value`, but is slower to compute than `from_shape`,\n", "# because first, the exposure for the whole USA is initiated:\n", "start = time.process_time()\n", - "exp = LitPop.from_shape_and_countries(admin1_shapes[idx], country_iso3a, res_arcsec=600, reference_year=2020)\n", - "print(f'\\n Runtime `from_shape_and_countries` : {time.process_time() - start:1.2f} sec.\\n')\n", - "exp.plot_scatter(vmin=100, buffer=.5);\n", + "exp = LitPop.from_shape_and_countries(\n", + " admin1_shapes[idx], country_iso3a, res_arcsec=600, reference_year=2020\n", + ")\n", + "print(\n", + " f\"\\n Runtime `from_shape_and_countries` : {time.process_time() - start:1.2f} sec.\\n\"\n", + ")\n", + "exp.plot_scatter(vmin=100, buffer=0.5)\n", "\"\"\"Note the differences in computational speed and total value between the two approaches\"\"\"" ] }, @@ -655,31 +674,36 @@ "from shapely.geometry import Polygon\n", "\n", "\"\"\"initiate LitPop exposures for a geographical box around the city of Zurich:\"\"\"\n", - "bounds = (8.41, 47.25, 8.70, 47.47) # (min_lon, max_lon, min_lat, max_lat)\n", - "total_value=1000 # required user input for `from_shape`, here we just assume USD 1000 of total value\n", - "shape = Polygon([\n", - " (bounds[0], bounds[3]),\n", - " (bounds[2], bounds[3]),\n", - " (bounds[2], bounds[1]),\n", - " (bounds[0], bounds[1])\n", - " ])\n", + "bounds = (8.41, 47.25, 8.70, 47.47) # (min_lon, max_lon, min_lat, max_lat)\n", + "total_value = 1000 # required user input for `from_shape`, here we just assume USD 1000 of total value\n", + "shape = Polygon(\n", + " [\n", + " (bounds[0], bounds[3]),\n", + " (bounds[2], bounds[3]),\n", + " (bounds[2], bounds[1]),\n", + " (bounds[0], bounds[1]),\n", + " ]\n", + ")\n", "import time\n", + "\n", "start = time.process_time()\n", "exp = LitPop.from_shape(shape, total_value)\n", - "print(f'\\n Runtime `from_shape` : {time.process_time() - start:1.2f} sec.\\n')\n", - "exp.plot_scatter();\n", + "print(f\"\\n Runtime `from_shape` : {time.process_time() - start:1.2f} sec.\\n\")\n", + "exp.plot_scatter()\n", "# `from_shape_and_countries` does not require `total_value`, but is slower to compute:\n", "start = time.process_time()\n", - "exp = LitPop.from_shape_and_countries(shape, 'Switzerland')\n", - "print(f'\\n Runtime `from_shape_and_countries` : {time.process_time() - start:1.2f} sec.\\n')\n", - "exp.plot_scatter();\n", + "exp = LitPop.from_shape_and_countries(shape, \"Switzerland\")\n", + "print(\n", + " f\"\\n Runtime `from_shape_and_countries` : {time.process_time() - start:1.2f} sec.\\n\"\n", + ")\n", + "exp.plot_scatter()\n", "\"\"\"Note the difference in total value between the two exposure sets!\"\"\"\n", "\n", "\"\"\"For comparison, initiate population exposure for a geographical box around the city of Zurich:\"\"\"\n", "start = time.process_time()\n", "exp_pop = LitPop.from_population(shape=shape)\n", - "print(f'\\n Runtime `from_population` : {time.process_time() - start:1.2f} sec.\\n')\n", - "exp_pop.plot_scatter();\n", + "print(f\"\\n Runtime `from_population` : {time.process_time() - start:1.2f} sec.\\n\")\n", + "exp_pop.plot_scatter()\n", "\n", "\"\"\"Population exposure for a custom shape can be initiated directly via `set_population` without providing `total_value`\"\"\"" ] @@ -727,14 +751,18 @@ "source": [ "# Initiate GDP-Entity for Switzerland, with and without admin1_calc:\n", "\n", - "ent_adm0 = LitPop.from_countries('CHE', res_arcsec=120, fin_mode='gdp', admin1_calc=False)\n", + "ent_adm0 = LitPop.from_countries(\n", + " \"CHE\", res_arcsec=120, fin_mode=\"gdp\", admin1_calc=False\n", + ")\n", "ent_adm0.set_geometry_points()\n", "\n", - "ent_adm1 = LitPop.from_countries('CHE', res_arcsec=120, fin_mode='gdp', admin1_calc=True)\n", + "ent_adm1 = LitPop.from_countries(\n", + " \"CHE\", res_arcsec=120, fin_mode=\"gdp\", admin1_calc=True\n", + ")\n", "\n", "ent_adm0.check()\n", "ent_adm1.check()\n", - "print('Done.')" + "print(\"Done.\")" ] }, { @@ -788,14 +816,15 @@ "source": [ "# Plotting:\n", "from matplotlib import colors\n", - "norm=colors.LogNorm(vmin=1e5, vmax=1e9) # setting range for the log-normal scale\n", + "\n", + "norm = colors.LogNorm(vmin=1e5, vmax=1e9) # setting range for the log-normal scale\n", "markersize = 5\n", - "ent_adm0.plot_hexbin(buffer=.3, norm=norm, linewidth=markersize);\n", - "ent_adm1.plot_hexbin(buffer=.3, norm=norm, linewidth=markersize);\n", + "ent_adm0.plot_hexbin(buffer=0.3, norm=norm, linewidth=markersize)\n", + "ent_adm1.plot_hexbin(buffer=0.3, norm=norm, linewidth=markersize)\n", "\n", - "print('admin-0: First figure')\n", - "print('admin-1: Second figure')\n", - "'''Do you spot the small differences in Graubünden (eastern Switzerland)?'''" + "print(\"admin-0: First figure\")\n", + "print(\"admin-1: Second figure\")\n", + "\"\"\"Do you spot the small differences in Graubünden (eastern Switzerland)?\"\"\"" ] } ], diff --git a/doc/tutorial/climada_entity_MeasureSet.ipynb b/doc/tutorial/climada_entity_MeasureSet.ipynb index e1b93a1035..812198362b 100644 --- a/doc/tutorial/climada_entity_MeasureSet.ipynb +++ b/doc/tutorial/climada_entity_MeasureSet.ipynb @@ -127,28 +127,28 @@ "\n", "# define measure\n", "meas = Measure(\n", - " name='Mangrove',\n", - " haz_type='TC',\n", + " name=\"Mangrove\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=500000000,\n", " mdd_impact=(1, 0),\n", " paa_impact=(1, -0.15),\n", - " hazard_inten_imp=(1, -10), # reduces intensity by 10\n", + " hazard_inten_imp=(1, -10), # reduces intensity by 10\n", ")\n", "\n", "# impact functions\n", "impf_tc = ImpfTropCyclone.from_emanuel_usa()\n", "impf_all = ImpactFuncSet([impf_tc])\n", - "impf_all.plot();\n", + "impf_all.plot()\n", "\n", "# dummy Hazard and Exposures\n", - "haz = Hazard('TC') # this measure does not change hazard\n", - "exp = Exposures() # this measure does not change exposures\n", + "haz = Hazard(\"TC\") # this measure does not change hazard\n", + "exp = Exposures() # this measure does not change exposures\n", "\n", "# new impact functions\n", "new_exp, new_impfs, new_haz = meas.apply(exp, impf_all, haz)\n", - "axes = new_impfs.plot();\n", - "axes.set_title('TC: Modified impact function')" + "axes = new_impfs.plot()\n", + "axes.set_title(\"TC: Modified impact function\")" ] }, { @@ -228,8 +228,8 @@ "\n", "# define measure\n", "meas = Measure(\n", - " name='Mangrove',\n", - " haz_type='TC',\n", + " name=\"Mangrove\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=500000000,\n", " hazard_freq_cutoff=0.0255,\n", @@ -250,14 +250,16 @@ "# new hazard\n", "new_exp, new_impfs, new_haz = meas.apply(exp, impf_all, haz)\n", "# if you look at the maximum intensity per centroid: new_haz does not contain the event with smaller impact (the most frequent)\n", - "haz.plot_intensity(0);\n", - "new_haz.plot_intensity(0);\n", + "haz.plot_intensity(0)\n", + "new_haz.plot_intensity(0)\n", "# you might also compute the exceedance frequency curve of both hazard\n", "imp = ImpactCalc(exp, impf_all, haz).impact()\n", - "ax = imp.calc_freq_curve().plot(label='original');\n", + "ax = imp.calc_freq_curve().plot(label=\"original\")\n", "\n", "new_imp = ImpactCalc(new_exp, new_impfs, new_haz).impact()\n", - "new_imp.calc_freq_curve().plot(axis=ax, label='measure'); # the damages for events with return periods > 1/0.0255 ~ 40 are 0" + "new_imp.calc_freq_curve().plot(\n", + " axis=ax, label=\"measure\"\n", + "); # the damages for events with return periods > 1/0.0255 ~ 40 are 0" ] }, { @@ -361,12 +363,12 @@ "\n", "# define measure\n", "meas = Measure(\n", - " name='Building code',\n", - " haz_type='TC',\n", + " name=\"Building code\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=500000000,\n", " hazard_freq_cutoff=0.00455,\n", - " exp_region_id=[1], # apply measure to points close to exposures with region_id=1\n", + " exp_region_id=[1], # apply measure to points close to exposures with region_id=1\n", ")\n", "\n", "# impact functions\n", @@ -379,7 +381,7 @@ "\n", "# Exposures\n", "exp = Exposures.from_hdf5(EXP_DEMO_H5)\n", - "#exp['region_id'] = np.ones(exp.shape[0])\n", + "# exp['region_id'] = np.ones(exp.shape[0])\n", "exp.check()\n", "# all exposures have region_id=1\n", "exp.plot_hexbin(buffer=1.0)\n", @@ -449,8 +451,8 @@ "\n", "# define measure\n", "meas = Measure(\n", - " name='Insurance',\n", - " haz_type='TC',\n", + " name=\"Insurance\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=500000000,\n", " risk_transf_attach=5.0e8,\n", @@ -471,12 +473,12 @@ "\n", "# impact before\n", "imp = ImpactCalc(exp, impf_all, haz).impact()\n", - "ax = imp.calc_freq_curve().plot(label='original');\n", + "ax = imp.calc_freq_curve().plot(label=\"original\")\n", "\n", "# impact after. risk_transf will be added to the cost of the measure\n", "imp_new, risk_transf = meas.calc_impact(exp, impf_all, haz)\n", - "imp_new.calc_freq_curve().plot(axis=ax, label='measure');\n", - "print('risk_transfer {:.3}'.format(risk_transf.aai_agg))" + "imp_new.calc_freq_curve().plot(axis=ax, label=\"measure\")\n", + "print(\"risk_transfer {:.3}\".format(risk_transf.aai_agg))" ] }, { @@ -515,8 +517,8 @@ "from climada.entity.measures import Measure, MeasureSet\n", "\n", "meas_1 = Measure(\n", - " haz_type='TC',\n", - " name='Mangrove',\n", + " haz_type=\"TC\",\n", + " name=\"Mangrove\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=500000000,\n", " mdd_impact=(1, 2),\n", @@ -526,8 +528,8 @@ ")\n", "\n", "meas_2 = Measure(\n", - " haz_type='TC',\n", - " name='Sandbags',\n", + " haz_type=\"TC\",\n", + " name=\"Sandbags\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=22000000,\n", " mdd_impact=(1, 2),\n", @@ -543,7 +545,7 @@ "meas_set.check()\n", "\n", "# select one measure\n", - "meas_sel = meas_set.get_measure(name='Sandbags')\n", + "meas_sel = meas_set.get_measure(name=\"Sandbags\")\n", "print(meas_sel[0].name, meas_sel[0].cost)" ] }, @@ -582,7 +584,7 @@ "from climada.util import ENT_TEMPLATE_XLS\n", "\n", "# Fill DataFrame from Excel file\n", - "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", + "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", "meas_set = MeasureSet.from_excel(file_name)\n", "meas_set" ] @@ -611,11 +613,11 @@ "from climada.util import ENT_TEMPLATE_XLS\n", "\n", "# Fill DataFrame from Excel file\n", - "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", + "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", "meas_set = MeasureSet.from_excel(file_name)\n", "\n", "# write file\n", - "meas_set.write_excel('results/tutorial_meas_set.xlsx')" + "meas_set.write_excel(\"results/tutorial_meas_set.xlsx\")" ] }, { @@ -638,8 +640,9 @@ "outputs": [], "source": [ "from climada.util.save import save\n", + "\n", "# this generates a results folder in the current path and stores the output there\n", - "save('tutorial_meas_set.p', meas_set)" + "save(\"tutorial_meas_set.p\", meas_set)" ] } ], diff --git a/doc/tutorial/climada_hazard_Hazard.ipynb b/doc/tutorial/climada_hazard_Hazard.ipynb index 94dd517dc5..19cc98a0f7 100644 --- a/doc/tutorial/climada_hazard_Hazard.ipynb +++ b/doc/tutorial/climada_hazard_Hazard.ipynb @@ -95,27 +95,33 @@ "import numpy as np\n", "from climada.hazard import Hazard\n", "from climada.util.constants import HAZ_DEMO_FL\n", + "\n", "# to hide the warnings\n", "import warnings\n", - "warnings.filterwarnings('ignore')\n", "\n", - "# read intensity from raster file HAZ_DEMO_FL and set frequency for the contained event\n", - "haz_ven = Hazard.from_raster([HAZ_DEMO_FL], attrs={'frequency':np.ones(1)/2}, haz_type='FL')\n", + "warnings.filterwarnings(\"ignore\")\n", + "\n", + "# read intensity from raster file HAZ_DEMO_FL and set frequency for the contained event\n", + "haz_ven = Hazard.from_raster(\n", + " [HAZ_DEMO_FL], attrs={\"frequency\": np.ones(1) / 2}, haz_type=\"FL\"\n", + ")\n", "haz_ven.check()\n", "\n", "# The masked values of the raster are set to 0\n", "# Sometimes the raster file does not contain all the information, as in this case the mask value -9999\n", "# We mask it manuall and plot it using plot_intensity()\n", - "haz_ven.intensity[haz_ven.intensity==-9999] = 0\n", - "haz_ven.plot_intensity(1, smooth=False) # if smooth=True (default value) is used, the computation time might increase\n", + "haz_ven.intensity[haz_ven.intensity == -9999] = 0\n", + "haz_ven.plot_intensity(\n", + " 1, smooth=False\n", + ") # if smooth=True (default value) is used, the computation time might increase\n", "\n", "# per default the following attributes have been set\n", - "print('event_id: ', haz_ven.event_id)\n", - "print('event_name: ', haz_ven.event_name)\n", - "print('date: ', haz_ven.date)\n", - "print('frequency: ', haz_ven.frequency)\n", - "print('orig: ', haz_ven.orig)\n", - "print('min, max fraction: ', haz_ven.fraction.min(), haz_ven.fraction.max())" + "print(\"event_id: \", haz_ven.event_id)\n", + "print(\"event_name: \", haz_ven.event_name)\n", + "print(\"date: \", haz_ven.date)\n", + "print(\"frequency: \", haz_ven.frequency)\n", + "print(\"orig: \", haz_ven.orig)\n", + "print(\"min, max fraction: \", haz_ven.fraction.min(), haz_ven.fraction.max())" ] }, { @@ -135,10 +141,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Put your code here\n", - "\n", - "\n", - "\n" + "# Put your code here" ] }, { @@ -212,30 +215,42 @@ "# Solution:\n", "\n", "# 1. The CRS can be reprojected using dst_crs option\n", - "haz = Hazard.from_raster([HAZ_DEMO_FL], dst_crs='epsg:2201', haz_type='FL')\n", + "haz = Hazard.from_raster([HAZ_DEMO_FL], dst_crs=\"epsg:2201\", haz_type=\"FL\")\n", "haz.check()\n", - "print('\\n Solution 1:')\n", - "print('centroids CRS:', haz.centroids.crs)\n", - "print('raster info:', haz.centroids.get_meta())\n", + "print(\"\\n Solution 1:\")\n", + "print(\"centroids CRS:\", haz.centroids.crs)\n", + "print(\"raster info:\", haz.centroids.get_meta())\n", "\n", "# 2. Transformations of the coordinates can be set using the transform option and Affine\n", "from rasterio import Affine\n", - "haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type='FL',\n", - " transform=Affine(0.009000000000000341, 0.0, -69.33714959699981, \\\n", - " 0.0, -0.009000000000000341, 10.42822096697894),\n", - " height=500, width=501)\n", + "\n", + "haz = Hazard.from_raster(\n", + " [HAZ_DEMO_FL],\n", + " haz_type=\"FL\",\n", + " transform=Affine(\n", + " 0.009000000000000341,\n", + " 0.0,\n", + " -69.33714959699981,\n", + " 0.0,\n", + " -0.009000000000000341,\n", + " 10.42822096697894,\n", + " ),\n", + " height=500,\n", + " width=501,\n", + ")\n", "haz.check()\n", - "print('\\n Solution 2:')\n", - "print('raster info:', haz.centroids.get_meta())\n", - "print('intensity size:', haz.intensity.shape)\n", + "print(\"\\n Solution 2:\")\n", + "print(\"raster info:\", haz.centroids.get_meta())\n", + "print(\"intensity size:\", haz.intensity.shape)\n", "\n", "# 3. A partial part of the raster can be loaded using the window or geometry\n", "from rasterio.windows import Window\n", - "haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type='FL', window=Window(10, 10, 20, 30))\n", + "\n", + "haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type=\"FL\", window=Window(10, 10, 20, 30))\n", "haz.check()\n", - "print('\\n Solution 3:')\n", - "print('raster info:', haz.centroids.get_meta())\n", - "print('intensity size:', haz.intensity.shape)" + "print(\"\\n Solution 3:\")\n", + "print(\"raster info:\", haz.centroids.get_meta())\n", + "print(\"intensity size:\", haz.intensity.shape)" ] }, { @@ -266,10 +281,13 @@ ], "source": [ "from climada.hazard import Hazard, Centroids\n", - "from climada.util import HAZ_DEMO_H5 # CLIMADA's Python file\n", + "from climada.util import HAZ_DEMO_H5 # CLIMADA's Python file\n", + "\n", "# Hazard needs to know the acronym of the hazard type to be constructed!!! Use 'NA' if not known.\n", - "haz_tc_fl = Hazard.from_hdf5(HAZ_DEMO_H5) # Historic tropical cyclones in Florida from 1990 to 2004\n", - "haz_tc_fl.check() # Use always the check() method to see if the hazard has been loaded correctly" + "haz_tc_fl = Hazard.from_hdf5(\n", + " HAZ_DEMO_H5\n", + ") # Historic tropical cyclones in Florida from 1990 to 2004\n", + "haz_tc_fl.check() # Use always the check() method to see if the hazard has been loaded correctly" ] }, { @@ -298,50 +316,152 @@ } ], "source": [ - "# setting points\n", + "# setting points\n", "import numpy as np\n", "from scipy import sparse\n", "\n", - "lat = np.array([26.933899, 26.957203, 26.783846, 26.645524, 26.897796, 26.925359, \\\n", - " 26.914768, 26.853491, 26.845099, 26.82651 , 26.842772, 26.825905, \\\n", - " 26.80465 , 26.788649, 26.704277, 26.71005 , 26.755412, 26.678449, \\\n", - " 26.725649, 26.720599, 26.71255 , 26.6649 , 26.664699, 26.663149, \\\n", - " 26.66875 , 26.638517, 26.59309 , 26.617449, 26.620079, 26.596795, \\\n", - " 26.577049, 26.524585, 26.524158, 26.523737, 26.520284, 26.547349, \\\n", - " 26.463399, 26.45905 , 26.45558 , 26.453699, 26.449999, 26.397299, \\\n", - " 26.4084 , 26.40875 , 26.379113, 26.3809 , 26.349068, 26.346349, \\\n", - " 26.348015, 26.347957])\n", - "\n", - "lon = np.array([-80.128799, -80.098284, -80.748947, -80.550704, -80.596929, \\\n", - " -80.220966, -80.07466 , -80.190281, -80.083904, -80.213493, \\\n", - " -80.0591 , -80.630096, -80.075301, -80.069885, -80.656841, \\\n", - " -80.190085, -80.08955 , -80.041179, -80.1324 , -80.091746, \\\n", - " -80.068579, -80.090698, -80.1254 , -80.151401, -80.058749, \\\n", - " -80.283371, -80.206901, -80.090649, -80.055001, -80.128711, \\\n", - " -80.076435, -80.080105, -80.06398 , -80.178973, -80.110519, \\\n", - " -80.057701, -80.064251, -80.07875 , -80.139247, -80.104316, \\\n", - " -80.188545, -80.21902 , -80.092391, -80.1575 , -80.102028, \\\n", - " -80.16885 , -80.116401, -80.08385 , -80.241305, -80.158855])\n", - "\n", - "n_cen = lon.size # number of centroids\n", - "n_ev = 10 # number of events\n", + "lat = np.array(\n", + " [\n", + " 26.933899,\n", + " 26.957203,\n", + " 26.783846,\n", + " 26.645524,\n", + " 26.897796,\n", + " 26.925359,\n", + " 26.914768,\n", + " 26.853491,\n", + " 26.845099,\n", + " 26.82651,\n", + " 26.842772,\n", + " 26.825905,\n", + " 26.80465,\n", + " 26.788649,\n", + " 26.704277,\n", + " 26.71005,\n", + " 26.755412,\n", + " 26.678449,\n", + " 26.725649,\n", + " 26.720599,\n", + " 26.71255,\n", + " 26.6649,\n", + " 26.664699,\n", + " 26.663149,\n", + " 26.66875,\n", + " 26.638517,\n", + " 26.59309,\n", + " 26.617449,\n", + " 26.620079,\n", + " 26.596795,\n", + " 26.577049,\n", + " 26.524585,\n", + " 26.524158,\n", + " 26.523737,\n", + " 26.520284,\n", + " 26.547349,\n", + " 26.463399,\n", + " 26.45905,\n", + " 26.45558,\n", + " 26.453699,\n", + " 26.449999,\n", + " 26.397299,\n", + " 26.4084,\n", + " 26.40875,\n", + " 26.379113,\n", + " 26.3809,\n", + " 26.349068,\n", + " 26.346349,\n", + " 26.348015,\n", + " 26.347957,\n", + " ]\n", + ")\n", + "\n", + "lon = np.array(\n", + " [\n", + " -80.128799,\n", + " -80.098284,\n", + " -80.748947,\n", + " -80.550704,\n", + " -80.596929,\n", + " -80.220966,\n", + " -80.07466,\n", + " -80.190281,\n", + " -80.083904,\n", + " -80.213493,\n", + " -80.0591,\n", + " -80.630096,\n", + " -80.075301,\n", + " -80.069885,\n", + " -80.656841,\n", + " -80.190085,\n", + " -80.08955,\n", + " -80.041179,\n", + " -80.1324,\n", + " -80.091746,\n", + " -80.068579,\n", + " -80.090698,\n", + " -80.1254,\n", + " -80.151401,\n", + " -80.058749,\n", + " -80.283371,\n", + " -80.206901,\n", + " -80.090649,\n", + " -80.055001,\n", + " -80.128711,\n", + " -80.076435,\n", + " -80.080105,\n", + " -80.06398,\n", + " -80.178973,\n", + " -80.110519,\n", + " -80.057701,\n", + " -80.064251,\n", + " -80.07875,\n", + " -80.139247,\n", + " -80.104316,\n", + " -80.188545,\n", + " -80.21902,\n", + " -80.092391,\n", + " -80.1575,\n", + " -80.102028,\n", + " -80.16885,\n", + " -80.116401,\n", + " -80.08385,\n", + " -80.241305,\n", + " -80.158855,\n", + " ]\n", + ")\n", + "\n", + "n_cen = lon.size # number of centroids\n", + "n_ev = 10 # number of events\n", "\n", "intensity = sparse.csr_matrix(np.random.random((n_ev, n_cen)))\n", "fraction = intensity.copy()\n", "fraction.data.fill(1)\n", "\n", - "haz = Hazard(haz_type='TC',\n", - " intensity=intensity,\n", - " fraction=fraction,\n", - " centroids=Centroids(lat=lat, lon=lon), # default crs used\n", - " units='m',\n", - " event_id=np.arange(n_ev, dtype=int),\n", - " event_name=['ev_12', 'ev_21', 'Maria', 'ev_35',\n", - " 'Irma', 'ev_16', 'ev_15', 'Edgar', 'ev_1', 'ev_9'],\n", - " date=np.array([721166, 734447, 734447, 734447, 721167,\n", - " 721166, 721167, 721200, 721166, 721166]),\n", - " orig=np.zeros(n_ev, bool),\n", - " frequency=np.ones(n_ev)/n_ev,)\n", + "haz = Hazard(\n", + " haz_type=\"TC\",\n", + " intensity=intensity,\n", + " fraction=fraction,\n", + " centroids=Centroids(lat=lat, lon=lon), # default crs used\n", + " units=\"m\",\n", + " event_id=np.arange(n_ev, dtype=int),\n", + " event_name=[\n", + " \"ev_12\",\n", + " \"ev_21\",\n", + " \"Maria\",\n", + " \"ev_35\",\n", + " \"Irma\",\n", + " \"ev_16\",\n", + " \"ev_15\",\n", + " \"Edgar\",\n", + " \"ev_1\",\n", + " \"ev_9\",\n", + " ],\n", + " date=np.array(\n", + " [721166, 734447, 734447, 734447, 721167, 721166, 721167, 721200, 721166, 721166]\n", + " ),\n", + " orig=np.zeros(n_ev, bool),\n", + " frequency=np.ones(n_ev) / n_ev,\n", + ")\n", "\n", "haz.check()\n", "haz.centroids.plot();" @@ -363,10 +483,17 @@ "# using from_pnt_bounds\n", "\n", "# bounds\n", - "left, bottom, right, top = -72, -3.0, -52.0, 22 # the bounds refer to the bounds of the center of the pixel\n", + "left, bottom, right, top = (\n", + " -72,\n", + " -3.0,\n", + " -52.0,\n", + " 22,\n", + ") # the bounds refer to the bounds of the center of the pixel\n", "# resolution\n", "res = 0.5\n", - "centroids = Centroids.from_pnt_bounds((left, bottom, right, top), res) # default crs used" + "centroids = Centroids.from_pnt_bounds(\n", + " (left, bottom, right, top), res\n", + ") # default crs used" ] }, { @@ -393,26 +520,24 @@ "\n", "# raster info:\n", "# border upper left corner (of the pixel, not of the center of the pixel)\n", - "max_lat = top + res/2\n", - "min_lon = left - res/2\n", + "max_lat = top + res / 2\n", + "min_lon = left - res / 2\n", "# resolution in lat and lon\n", - "d_lat = -res # negative because starting in upper corner\n", - "d_lon = res # same step as d_lat\n", + "d_lat = -res # negative because starting in upper corner\n", + "d_lon = res # same step as d_lat\n", "# number of points\n", "n_lat, n_lon = centroids.shape\n", "\n", "# meta: raster specification\n", "meta = {\n", - " 'dtype': 'float32',\n", - " 'width': n_lon,\n", - " 'height': n_lat,\n", - " 'crs': DEF_CRS,\n", - " 'transform': rasterio.Affine(\n", - " a=d_lon, b=0.0, c=min_lon,\n", - " d=0.0, e=d_lat, f=max_lat),\n", + " \"dtype\": \"float32\",\n", + " \"width\": n_lon,\n", + " \"height\": n_lat,\n", + " \"crs\": DEF_CRS,\n", + " \"transform\": rasterio.Affine(a=d_lon, b=0.0, c=min_lon, d=0.0, e=d_lat, f=max_lat),\n", "}\n", "\n", - "centroids_from_meta = Centroids.from_meta(meta) # default crs used\n", + "centroids_from_meta = Centroids.from_meta(meta) # default crs used\n", "\n", "centroids_from_meta == centroids" ] @@ -446,27 +571,40 @@ "import numpy as np\n", "from scipy import sparse\n", "\n", - "n_ev = 10 # number of events\n", + "n_ev = 10 # number of events\n", "\n", "intensity = sparse.csr_matrix(np.random.random((n_ev, centroids.size)))\n", "fraction = intensity.copy()\n", "fraction.data.fill(1)\n", "\n", - "haz = Hazard('TC',\n", - " centroids=centroids,\n", - " intensity=intensity,\n", - " fraction=fraction,\n", - " units='m',\n", - " event_id=np.arange(n_ev, dtype=int),\n", - " event_name=['ev_12', 'ev_21', 'Maria', 'ev_35',\n", - " 'Irma', 'ev_16', 'ev_15', 'Edgar', 'ev_1', 'ev_9'],\n", - " date=np.array([721166, 734447, 734447, 734447, 721167,\n", - " 721166, 721167, 721200, 721166, 721166]),\n", - " orig=np.zeros(n_ev, bool),\n", - " frequency=np.ones(n_ev)/n_ev,)\n", + "haz = Hazard(\n", + " \"TC\",\n", + " centroids=centroids,\n", + " intensity=intensity,\n", + " fraction=fraction,\n", + " units=\"m\",\n", + " event_id=np.arange(n_ev, dtype=int),\n", + " event_name=[\n", + " \"ev_12\",\n", + " \"ev_21\",\n", + " \"Maria\",\n", + " \"ev_35\",\n", + " \"Irma\",\n", + " \"ev_16\",\n", + " \"ev_15\",\n", + " \"Edgar\",\n", + " \"ev_1\",\n", + " \"ev_9\",\n", + " ],\n", + " date=np.array(\n", + " [721166, 734447, 734447, 734447, 721167, 721166, 721167, 721200, 721166, 721166]\n", + " ),\n", + " orig=np.zeros(n_ev, bool),\n", + " frequency=np.ones(n_ev) / n_ev,\n", + ")\n", "\n", "haz.check()\n", - "print('Check centroids borders:', haz.centroids.total_bounds)\n", + "print(\"Check centroids borders:\", haz.centroids.total_bounds)\n", "haz.centroids.plot();" ] }, @@ -512,8 +650,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Put your code here:\n", - "\n" + "# Put your code here:" ] }, { @@ -522,7 +659,7 @@ "metadata": {}, "outputs": [], "source": [ - "#help(hist_tc.centroids) # If you want to run it, do it after you execute the next block" + "# help(hist_tc.centroids) # If you want to run it, do it after you execute the next block" ] }, { @@ -548,26 +685,26 @@ "# SOLUTION:\n", "\n", "# 1.How many synthetic events are contained?\n", - "print('Number of total events:', haz_tc_fl.size)\n", - "print('Number of synthetic events:', np.logical_not(haz_tc_fl.orig).astype(int).sum())\n", + "print(\"Number of total events:\", haz_tc_fl.size)\n", + "print(\"Number of synthetic events:\", np.logical_not(haz_tc_fl.orig).astype(int).sum())\n", "\n", "# 2. Generate a hazard with historical hurricanes ocurring between 1995 and 2001.\n", - "hist_tc = haz_tc_fl.select(date=('1995-01-01', '2001-12-31'), orig=True)\n", - "print('Number of historical events between 1995 and 2001:', hist_tc.size)\n", + "hist_tc = haz_tc_fl.select(date=(\"1995-01-01\", \"2001-12-31\"), orig=True)\n", + "print(\"Number of historical events between 1995 and 2001:\", hist_tc.size)\n", "\n", "# 3. How many historical hurricanes occured in 1999? Which was the year with most hurricanes between 1995 and 2001?\n", - "ev_per_year = hist_tc.calc_year_set() # events ids per year\n", - "print('Number of events in 1999:', ev_per_year[1999].size)\n", + "ev_per_year = hist_tc.calc_year_set() # events ids per year\n", + "print(\"Number of events in 1999:\", ev_per_year[1999].size)\n", "max_year = 1995\n", "max_ev = ev_per_year[1995].size\n", "for year, ev in ev_per_year.items():\n", " if ev.size > max_ev:\n", " max_year = year\n", - "print('Year with most hurricanes between 1995 and 2001:', max_year)\n", + "print(\"Year with most hurricanes between 1995 and 2001:\", max_year)\n", "\n", - "# 4. What is the number of centroids with distance to coast smaller than 1km?\n", + "# 4. What is the number of centroids with distance to coast smaller than 1km?\n", "num_cen_coast = np.argwhere(hist_tc.centroids.get_dist_coast() < 1000).size\n", - "print('Number of centroids close to coast: ', num_cen_coast)" + "print(\"Number of centroids close to coast: \", num_cen_coast)" ] }, { @@ -745,14 +882,16 @@ ], "source": [ "# 1. intensities of the largest event (defined as greater sum of intensities):\n", - "# all events:\n", - "haz_tc_fl.plot_intensity(event=-1) # largest historical event: 1992230N11325 hurricane ANDREW\n", + "# all events:\n", + "haz_tc_fl.plot_intensity(\n", + " event=-1\n", + ") # largest historical event: 1992230N11325 hurricane ANDREW\n", "\n", "# 2. maximum intensities at each centroid:\n", "haz_tc_fl.plot_intensity(event=0)\n", "\n", "# 3. intensities of hurricane 1998295N12284:\n", - "haz_tc_fl.plot_intensity(event='1998295N12284', cmap='BuGn') # setting color map\n", + "haz_tc_fl.plot_intensity(event=\"1998295N12284\", cmap=\"BuGn\") # setting color map\n", "\n", "# 4. tropical cyclone intensities maps for the return periods [10, 50, 75, 100]\n", "_, res = haz_tc_fl.plot_rp_intensity([10, 50, 75, 100])\n", @@ -760,6 +899,7 @@ "# 5. tropical cyclone return period maps for the threshold intensities [30, 40]\n", "return_periods, label, column_label = haz_tc_fl.local_return_period([30, 40])\n", "from climada.util.plot import plot_from_gdf\n", + "\n", "plot_from_gdf(return_periods, colorbar_name=label, title_subplots=column_label)\n", "\n", "# 6. intensities of all the events in centroid with id 50\n", @@ -791,9 +931,9 @@ "import matplotlib.pyplot as plt\n", "\n", "fig, ax1, fontsize = make_map(1) # map\n", - "ax2 = fig.add_subplot(2, 1, 2) # add regular axes\n", - "haz_tc_fl.plot_intensity(axis=ax1, event=0) # plot original resolution\n", - "ax1.plot(-80, 26, 'or', mfc='none', markersize=12)\n", + "ax2 = fig.add_subplot(2, 1, 2) # add regular axes\n", + "haz_tc_fl.plot_intensity(axis=ax1, event=0) # plot original resolution\n", + "ax1.plot(-80, 26, \"or\", mfc=\"none\", markersize=12)\n", "haz_tc_fl.plot_intensity(axis=ax2, centr=(26, -80))\n", "fig.subplots_adjust(hspace=6.5)" ] @@ -830,9 +970,9 @@ ], "source": [ "# If you see an error message, try to create a depository named results in the repository tutorial.\n", - "haz_tc_fl.write_hdf5('results/haz_tc_fl.h5')\n", + "haz_tc_fl.write_hdf5(\"results/haz_tc_fl.h5\")\n", "\n", - "haz = Hazard.from_hdf5('results/haz_tc_fl.h5')\n", + "haz = Hazard.from_hdf5(\"results/haz_tc_fl.h5\")\n", "haz.check()" ] }, @@ -857,7 +997,7 @@ } ], "source": [ - "haz_ven.write_raster('results/haz_ven.tif') # each event is a band of the tif file" + "haz_ven.write_raster(\"results/haz_ven.tif\") # each event is a band of the tif file" ] }, { @@ -882,8 +1022,9 @@ ], "source": [ "from climada.util.save import save\n", + "\n", "# this generates a results folder in the current path and stores the output there\n", - "save('tutorial_haz_tc_fl.p', haz_tc_fl)" + "save(\"tutorial_haz_tc_fl.p\", haz_tc_fl)" ] } ], diff --git a/doc/tutorial/climada_hazard_StormEurope.ipynb b/doc/tutorial/climada_hazard_StormEurope.ipynb index 3c0ba68658..7772d60579 100644 --- a/doc/tutorial/climada_hazard_StormEurope.ipynb +++ b/doc/tutorial/climada_hazard_StormEurope.ipynb @@ -21,7 +21,8 @@ "source": [ "%matplotlib inline\n", "import matplotlib.pyplot as plt\n", - "plt.rcParams['figure.figsize'] = [15, 10]" + "\n", + "plt.rcParams[\"figure.figsize\"] = [15, 10]" ] }, { @@ -107,7 +108,7 @@ } ], "source": [ - "storm_instance?" + "?storm_instance" ] }, { @@ -144,12 +145,12 @@ "outputs": [], "source": [ "storm_instance.set_ssi(\n", - " method = 'wind_gust',\n", - " intensity = storm_instance.intensity,\n", + " method=\"wind_gust\",\n", + " intensity=storm_instance.intensity,\n", " # the above is just a more explicit way of passing the default\n", - " on_land = True,\n", - " threshold = 25,\n", - " sel_cen = None\n", + " on_land=True,\n", + " threshold=25,\n", + " sel_cen=None,\n", " # None is default. sel_cen could be used to subset centroids\n", ")" ] @@ -244,16 +245,16 @@ "outputs": [], "source": [ "ssi_args = {\n", - " 'on_land': True,\n", - " 'threshold': 25,\n", + " \"on_land\": True,\n", + " \"threshold\": 25,\n", "}\n", "\n", "storm_prob_xtreme = storm_instance.generate_prob_storms(\n", - " reg_id=[56, 528], # BEL and NLD\n", + " reg_id=[56, 528], # BEL and NLD\n", " spatial_shift=2,\n", " ssi_args=ssi_args,\n", " power=1.5,\n", - " scale=0.3\n", + " scale=0.3,\n", ")" ] }, @@ -306,7 +307,7 @@ } ], "source": [ - "storm_prob_xtreme.plot_ssi(full_area=True);\n", + "storm_prob_xtreme.plot_ssi(full_area=True)\n", "storm_prob.plot_ssi(full_area=True);" ] } diff --git a/doc/tutorial/climada_hazard_TropCyclone.ipynb b/doc/tutorial/climada_hazard_TropCyclone.ipynb index 79b63981a0..480d5c0b49 100644 --- a/doc/tutorial/climada_hazard_TropCyclone.ipynb +++ b/doc/tutorial/climada_hazard_TropCyclone.ipynb @@ -142,26 +142,35 @@ "%matplotlib inline\n", "from climada.hazard import TCTracks\n", "\n", - "tr_irma = TCTracks.from_ibtracs_netcdf(provider='usa', storm_id='2017242N16333') # IRMA 2017\n", - "ax = tr_irma.plot();\n", - "ax.set_title('IRMA') # set title\n", + "tr_irma = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", storm_id=\"2017242N16333\"\n", + ") # IRMA 2017\n", + "ax = tr_irma.plot()\n", + "ax.set_title(\"IRMA\") # set title\n", "\n", "# other ibtracs selection options\n", "from climada.hazard import TCTracks\n", + "\n", "# years 1993 and 1994 in basin EP.\n", "# correct_pres ignores tracks with not enough data. For statistics (frequency of events), these should be considered as well\n", - "sel_ibtracs = TCTracks.from_ibtracs_netcdf(provider='usa', year_range=(1993, 1994), basin='EP', correct_pres=False)\n", - "print('Number of tracks:', sel_ibtracs.size)\n", - "ax = sel_ibtracs.plot();\n", - "ax.get_legend()._loc = 2 # correct legend location\n", - "ax.set_title('1993-1994, EP') # set title\n", + "sel_ibtracs = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", year_range=(1993, 1994), basin=\"EP\", correct_pres=False\n", + ")\n", + "print(\"Number of tracks:\", sel_ibtracs.size)\n", + "ax = sel_ibtracs.plot()\n", + "ax.get_legend()._loc = 2 # correct legend location\n", + "ax.set_title(\"1993-1994, EP\") # set title\n", "\n", - "track1 = TCTracks.from_ibtracs_netcdf(provider='usa', storm_id='2007314N10093') # SIDR 2007\n", - "track2 = TCTracks.from_ibtracs_netcdf(provider='usa', storm_id='2016138N10081') # ROANU 2016\n", - "track1.append(track2.data) # put both tracks together\n", - "ax = track1.plot();\n", - "ax.get_legend()._loc = 2 # correct legend location\n", - "ax.set_title('SIDR and ROANU'); # set title" + "track1 = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", storm_id=\"2007314N10093\"\n", + ") # SIDR 2007\n", + "track2 = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", storm_id=\"2016138N10081\"\n", + ") # ROANU 2016\n", + "track1.append(track2.data) # put both tracks together\n", + "ax = track1.plot()\n", + "ax.get_legend()._loc = 2 # correct legend location\n", + "ax.set_title(\"SIDR and ROANU\"); # set title" ] }, { @@ -781,7 +790,7 @@ } ], "source": [ - "tr_irma.get_track('2017242N16333')" + "tr_irma.get_track(\"2017242N16333\")" ] }, { @@ -1675,7 +1684,7 @@ } ], "source": [ - "tr_irma.data[-1] # last synthetic track. notice the value of orig_event_flag and name" + "tr_irma.data[-1] # last synthetic track. notice the value of orig_event_flag and name" ] }, { @@ -1701,10 +1710,7 @@ }, "outputs": [], "source": [ - "# Put your code here\n", - "\n", - "\n", - "\n" + "# Put your code here" ] }, { @@ -1737,20 +1743,23 @@ "source": [ "# SOLUTION:\n", "import numpy as np\n", + "\n", "# select the track\n", - "tc_syn = tr_irma.get_track('2017242N16333_gen1')\n", + "tc_syn = tr_irma.get_track(\"2017242N16333_gen1\")\n", "\n", "# 1. Which is the time frequency of the data?\n", "# The values of a DataArray are numpy.arrays.\n", "# The nummpy.ediff1d computes the different between elements in an array\n", "diff_time_ns = np.ediff1d(tc_syn[\"time\"])\n", - "diff_time_h = diff_time_ns.astype(int)/1000/1000/1000/60/60\n", - "print('Mean time frequency in hours:', diff_time_h.mean())\n", - "print('Std time frequency in hours:', diff_time_h.std())\n", + "diff_time_h = diff_time_ns.astype(int) / 1000 / 1000 / 1000 / 60 / 60\n", + "print(\"Mean time frequency in hours:\", diff_time_h.mean())\n", + "print(\"Std time frequency in hours:\", diff_time_h.std())\n", "print()\n", "\n", "# 2. Compute the maximum sustained wind for each day.\n", - "print('Daily max sustained wind:', tc_syn[\"max_sustained_wind\"].groupby('time.day').max())" + "print(\n", + " \"Daily max sustained wind:\", tc_syn[\"max_sustained_wind\"].groupby(\"time.day\").max()\n", + ")" ] }, { @@ -1887,15 +1896,16 @@ "min_lat, max_lat, min_lon, max_lon = 16.99375, 21.95625, -72.48125, -61.66875\n", "cent = Centroids.from_pnt_bounds((min_lon, min_lat, max_lon, max_lat), res=0.12)\n", "cent.check()\n", - "cent.plot();\n", + "cent.plot()\n", "\n", "# construct tropical cyclones\n", "tc_irma = TropCyclone.from_tracks(tr_irma, centroids=cent)\n", "# tc_irma = TropCyclone.from_tracks(tr_irma) # try without given centroids. It might take too much space of your memory\n", "# and then the kernel will be killed: So, don't use this function without given centroids!\n", "tc_irma.check()\n", - "tc_irma.plot_intensity('2017242N16333'); # IRMA\n", - "tc_irma.plot_intensity('2017242N16333_gen2'); # IRMA's synthetic track 2" + "tc_irma.plot_intensity(\"2017242N16333\")\n", + "# IRMA\n", + "tc_irma.plot_intensity(\"2017242N16333_gen2\"); # IRMA's synthetic track 2" ] }, { @@ -1944,13 +1954,18 @@ "source": [ "# an Irma event-like in 2055 under RCP 4.5:\n", "tc_irma = TropCyclone.from_tracks(tr_irma, centroids=cent)\n", - "tc_irma_cc = tc_irma.apply_climate_scenario_knu(target_year=2055, scenario='4.5')\n", + "tc_irma_cc = tc_irma.apply_climate_scenario_knu(target_year=2055, scenario=\"4.5\")\n", "\n", "rel_freq_incr = np.round(\n", - " (np.mean(tc_irma_cc.frequency) - np.mean(tc_irma.frequency)\n", - " ) / np.mean(tc_irma.frequency)*100, 0)\n", + " (np.mean(tc_irma_cc.frequency) - np.mean(tc_irma.frequency))\n", + " / np.mean(tc_irma.frequency)\n", + " * 100,\n", + " 0,\n", + ")\n", "\n", - "print(f\"\\nA TC like Irma would undergo a frequency increase of about {rel_freq_incr} % in 2055 under RCP 45\")" + "print(\n", + " f\"\\nA TC like Irma would undergo a frequency increase of about {rel_freq_incr} % in 2055 under RCP 45\"\n", + ")" ] }, { @@ -2067,9 +2082,9 @@ "\n", "from climada.hazard import Centroids, TropCyclone, TCTracks\n", "\n", - "track_name = '2017242N16333' #'2016273N13300' #'1992230N11325'\n", + "track_name = \"2017242N16333\" #'2016273N13300' #'1992230N11325'\n", "\n", - "tr_irma = TCTracks.from_ibtracs_netcdf(provider='usa', storm_id='2017242N16333')\n", + "tr_irma = TCTracks.from_ibtracs_netcdf(provider=\"usa\", storm_id=\"2017242N16333\")\n", "\n", "lon_min, lat_min, lon_max, lat_max = -83.5, 24.4, -79.8, 29.6\n", "centr_video = Centroids.from_pnt_bounds((lon_min, lat_min, lon_max, lat_max), 0.04)\n", @@ -2077,7 +2092,9 @@ "\n", "tc_video = TropCyclone()\n", "\n", - "tc_list, tr_coord = tc_video.video_intensity(track_name, tr_irma, centr_video, file_name='results/irma_tc_fl.gif')" + "tc_list, tr_coord = tc_video.video_intensity(\n", + " track_name, tr_irma, centr_video, file_name=\"results/irma_tc_fl.gif\"\n", + ")" ] }, { @@ -2156,9 +2173,11 @@ "from matplotlib import animation\n", "from matplotlib.pyplot import rcParams\n", "\n", - "rcParams['animation.ffmpeg_path'] = shutil.which('ffmpeg')\n", + "rcParams[\"animation.ffmpeg_path\"] = shutil.which(\"ffmpeg\")\n", "writer = animation.FFMpegWriter(bitrate=500)\n", - "tc_list, tr_coord = tc_video.video_intensity(track_name, tr_irma, centr_video, file_name='results/irma_tc_fl.mp4', writer=writer)" + "tc_list, tr_coord = tc_video.video_intensity(\n", + " track_name, tr_irma, centr_video, file_name=\"results/irma_tc_fl.mp4\", writer=writer\n", + ")" ] }, { diff --git a/doc/tutorial/climada_util_api_client.ipynb b/doc/tutorial/climada_util_api_client.ipynb index 580e0b08da..215f8b6d0f 100644 --- a/doc/tutorial/climada_util_api_client.ipynb +++ b/doc/tutorial/climada_util_api_client.ipynb @@ -28,6 +28,7 @@ "outputs": [], "source": [ "from climada.util.api_client import Client\n", + "\n", "client = Client()" ] }, @@ -150,10 +151,11 @@ ], "source": [ "import pandas as pd\n", + "\n", "data_types = client.list_data_type_infos()\n", "\n", "dtf = pd.DataFrame(data_types)\n", - "dtf.sort_values(['data_type_group', 'data_type'])" + "dtf.sort_values([\"data_type_group\", \"data_type\"])" ] }, { @@ -170,7 +172,7 @@ "metadata": {}, "outputs": [], "source": [ - "litpop_dataset_infos = client.list_dataset_infos(data_type='litpop')" + "litpop_dataset_infos = client.list_dataset_infos(data_type=\"litpop\")" ] }, { @@ -233,7 +235,9 @@ "source": [ "# as datasets are usually available per country, chosing a country or global dataset reduces the options\n", "# here we want to see which datasets are available for litpop globally:\n", - "client.get_property_values(litpop_dataset_infos, known_property_values = {'spatial_coverage':'global'})" + "client.get_property_values(\n", + " litpop_dataset_infos, known_property_values={\"spatial_coverage\": \"global\"}\n", + ")" ] }, { @@ -259,8 +263,10 @@ } ], "source": [ - "#and here for Switzerland:\n", - "client.get_property_values(litpop_dataset_infos, known_property_values = {'country_name':'Switzerland'})" + "# and here for Switzerland:\n", + "client.get_property_values(\n", + " litpop_dataset_infos, known_property_values={\"country_name\": \"Switzerland\"}\n", + ")" ] }, { @@ -313,8 +319,10 @@ } ], "source": [ - "tc_dataset_infos = client.list_dataset_infos(data_type='tropical_cyclone')\n", - "client.get_property_values(tc_dataset_infos, known_property_values = {'country_name':'Haiti'})" + "tc_dataset_infos = client.list_dataset_infos(data_type=\"tropical_cyclone\")\n", + "client.get_property_values(\n", + " tc_dataset_infos, known_property_values={\"country_name\": \"Haiti\"}\n", + ")" ] }, { @@ -347,7 +355,15 @@ ], "source": [ "client = Client()\n", - "tc_haiti = client.get_hazard('tropical_cyclone', properties={'country_name': 'Haiti', 'climate_scenario': 'rcp45', 'ref_year':'2040', 'nb_synth_tracks':'10'})\n", + "tc_haiti = client.get_hazard(\n", + " \"tropical_cyclone\",\n", + " properties={\n", + " \"country_name\": \"Haiti\",\n", + " \"climate_scenario\": \"rcp45\",\n", + " \"ref_year\": \"2040\",\n", + " \"nb_synth_tracks\": \"10\",\n", + " },\n", + ")\n", "tc_haiti.plot_intensity(0);" ] }, @@ -365,7 +381,9 @@ "metadata": {}, "outputs": [], "source": [ - "litpop_default = client.get_property_values(litpop_dataset_infos, known_property_values = {'fin_mode':'pc', 'exponents':'(1,1)'})" + "litpop_default = client.get_property_values(\n", + " litpop_dataset_infos, known_property_values={\"fin_mode\": \"pc\", \"exponents\": \"(1,1)\"}\n", + ")" ] }, { @@ -385,7 +403,7 @@ } ], "source": [ - "litpop = client.get_litpop(country='Haiti')" + "litpop = client.get_litpop(country=\"Haiti\")" ] }, { @@ -446,6 +464,7 @@ "outputs": [], "source": [ "from climada.engine import ImpactCalc\n", + "\n", "impact = ImpactCalc(litpop, imp_fun_set, tc_haiti).impact()" ] }, @@ -476,7 +495,7 @@ } ], "source": [ - "crop_dataset_infos = client.list_dataset_infos(data_type='crop_production')\n", + "crop_dataset_infos = client.list_dataset_infos(data_type=\"crop_production\")\n", "\n", "client.get_property_values(crop_dataset_infos)" ] @@ -487,7 +506,10 @@ "metadata": {}, "outputs": [], "source": [ - "rice_exposure = client.get_exposures(exposures_type='crop_production', properties = {'crop':'ric', 'unit': 'USD','irrigation_status': 'noirr'})" + "rice_exposure = client.get_exposures(\n", + " exposures_type=\"crop_production\",\n", + " properties={\"crop\": \"ric\", \"unit\": \"USD\", \"irrigation_status\": \"noirr\"},\n", + ")" ] }, { @@ -584,7 +606,7 @@ } ], "source": [ - "centroids_nopoles = client.get_centroids(extent=[-180,180,-60,50])\n", + "centroids_nopoles = client.get_centroids(extent=[-180, 180, -60, 50])\n", "centroids_nopoles.plot()" ] }, @@ -612,7 +634,7 @@ } ], "source": [ - "centroids_hti = client.get_centroids(country='HTI')" + "centroids_hti = client.get_centroids(country=\"HTI\")" ] }, { @@ -667,7 +689,7 @@ } ], "source": [ - "Client?" + "?Client" ] }, { @@ -741,7 +763,7 @@ } ], "source": [ - "client.get_dataset_info_by_uuid('b1c76120-4e60-4d8f-99c0-7e1e7b7860ec')" + "client.get_dataset_info_by_uuid(\"b1c76120-4e60-4d8f-99c0-7e1e7b7860ec\")" ] }, { @@ -810,7 +832,8 @@ ], "source": [ "from climada.util.api_client import DatasetInfo\n", - "DatasetInfo?" + "\n", + "?DatasetInfo" ] }, { @@ -849,7 +872,8 @@ ], "source": [ "from climada.util.api_client import FileInfo\n", - "FileInfo?" + "\n", + "?FileInfo" ] }, { @@ -890,7 +914,7 @@ } ], "source": [ - "client.into_datasets_df?" + "?client.into_datasets_df" ] }, { @@ -1059,8 +1083,12 @@ ], "source": [ "from climada.util.api_client import Client\n", + "\n", "client = Client()\n", - "litpop_datasets = client.list_dataset_infos(data_type='litpop', properties={'country_name': 'South Georgia and the South Sandwich Islands'})\n", + "litpop_datasets = client.list_dataset_infos(\n", + " data_type=\"litpop\",\n", + " properties={\"country_name\": \"South Georgia and the South Sandwich Islands\"},\n", + ")\n", "litpop_df = client.into_datasets_df(litpop_datasets)\n", "litpop_df" ] @@ -1127,7 +1155,7 @@ } ], "source": [ - "client.download_dataset?" + "?client.download_dataset" ] }, { @@ -1161,7 +1189,9 @@ ], "source": [ "# Let's have a look at an example for downloading a litpop dataset first\n", - "ds = litpop_datasets[0] # litpop_datasets is a list and download_dataset expects a single object as argument.\n", + "ds = litpop_datasets[\n", + " 0\n", + "] # litpop_datasets is a list and download_dataset expects a single object as argument.\n", "download_dir, ds_files = client.download_dataset(ds)\n", "ds_files[0], ds_files[0].is_file()" ] @@ -1214,9 +1244,14 @@ ], "source": [ "from climada.util.api_client import Client\n", + "\n", "Client().get_dataset_file(\n", - " data_type='litpop',\n", - " properties={'country_name': 'South Georgia and the South Sandwich Islands', 'fin_mode': 'pop'})" + " data_type=\"litpop\",\n", + " properties={\n", + " \"country_name\": \"South Georgia and the South Sandwich Islands\",\n", + " \"fin_mode\": \"pop\",\n", + " },\n", + ")" ] }, { diff --git a/doc/tutorial/climada_util_earth_engine.ipynb b/doc/tutorial/climada_util_earth_engine.ipynb index d6ca785cec..10811ce4d7 100644 --- a/doc/tutorial/climada_util_earth_engine.ipynb +++ b/doc/tutorial/climada_util_earth_engine.ipynb @@ -53,8 +53,9 @@ "import webbrowser\n", "\n", "import ee\n", + "\n", "ee.Initialize()\n", - "image = ee.Image('srtm90_v4')\n", + "image = ee.Image(\"srtm90_v4\")\n", "print(image.getInfo())" ] }, @@ -75,10 +76,11 @@ "outputs": [], "source": [ "# Access a specific image\n", - "image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318'); #Landsat 8 image, with Top of Atmosphere processing, on 2014/03/18 \n", + "image = ee.Image(\"LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318\")\n", + "# Landsat 8 image, with Top of Atmosphere processing, on 2014/03/18\n", "\n", "# Access a collection\n", - "collection = 'LANDSAT/LE07/C01/T1' #Landsat 7 raw images collection" + "collection = \"LANDSAT/LE07/C01/T1\" # Landsat 7 raw images collection" ] }, { @@ -109,32 +111,38 @@ } ], "source": [ - "#Landsat_composite in Dresden area\n", - "area_dresden = list([(13.6, 50.96), (13.9, 50.96), (13.9, 51.12), (13.6, 51.12), (13.6, 50.96)])\n", - "area_dresden = ee.Geometry.Polygon(area_dresden) \n", - "time_range_dresden = ['2002-07-28', '2002-08-05']\n", - "\n", - "collection_dresden = ('LANDSAT/LE07/C01/T1')\n", + "# Landsat_composite in Dresden area\n", + "area_dresden = list(\n", + " [(13.6, 50.96), (13.9, 50.96), (13.9, 51.12), (13.6, 51.12), (13.6, 50.96)]\n", + ")\n", + "area_dresden = ee.Geometry.Polygon(area_dresden)\n", + "time_range_dresden = [\"2002-07-28\", \"2002-08-05\"]\n", + "\n", + "collection_dresden = \"LANDSAT/LE07/C01/T1\"\n", "print(type(area_dresden))\n", "\n", - "#Population density in Switzerland\n", - "list_swiss = list([(6.72, 47.88),(6.72, 46.55),(9.72, 46.55),(9.72, 47.88),(6.72, 47.88)]) \n", - "area_swiss = ee.Geometry.Polygon(list_swiss) \n", - "time_range_swiss=['2002-01-01', '2005-12-30']\n", + "# Population density in Switzerland\n", + "list_swiss = list(\n", + " [(6.72, 47.88), (6.72, 46.55), (9.72, 46.55), (9.72, 47.88), (6.72, 47.88)]\n", + ")\n", + "area_swiss = ee.Geometry.Polygon(list_swiss)\n", + "time_range_swiss = [\"2002-01-01\", \"2005-12-30\"]\n", "\n", - "collection_swiss = ee.ImageCollection('CIESIN/GPWv4/population-density')\n", + "collection_swiss = ee.ImageCollection(\"CIESIN/GPWv4/population-density\")\n", "print(type(collection_swiss))\n", "\n", - "#Sentinel 2 cloud-free image in Zürich\n", - "collection_zurich = ('COPERNICUS/S2')\n", - "list_zurich = list([(8.53, 47.355),(8.55, 47.355),(8.55, 47.376),(8.53, 47.376),(8.53, 47.355)]) \n", - "area_zurich = ee.Geometry.Polygon(list_swiss) \n", - "time_range_zurich = ['2018-05-01', '2018-07-30']\n", + "# Sentinel 2 cloud-free image in Zürich\n", + "collection_zurich = \"COPERNICUS/S2\"\n", + "list_zurich = list(\n", + " [(8.53, 47.355), (8.55, 47.355), (8.55, 47.376), (8.53, 47.376), (8.53, 47.355)]\n", + ")\n", + "area_zurich = ee.Geometry.Polygon(list_swiss)\n", + "time_range_zurich = [\"2018-05-01\", \"2018-07-30\"]\n", "\n", "\n", - "#Landcover in Europe with CORINE dataset\n", - "dataset_landcover = ee.Image('COPERNICUS/CORINE/V18_5_1/100m/2012')\n", - "landCover_layer = dataset_landcover.select('landcover')\n", + "# Landcover in Europe with CORINE dataset\n", + "dataset_landcover = ee.Image(\"COPERNICUS/CORINE/V18_5_1/100m/2012\")\n", + "landCover_layer = dataset_landcover.select(\"landcover\")\n", "print(type(landCover_layer))" ] }, @@ -144,9 +152,9 @@ "metadata": {}, "outputs": [], "source": [ - "#Methods from climada.util.earth_engine module\n", + "# Methods from climada.util.earth_engine module\n", "def obtain_image_landsat_composite(collection, time_range, area):\n", - " \"\"\" Selection of Landsat cloud-free composites in the Earth Engine library\n", + " \"\"\"Selection of Landsat cloud-free composites in the Earth Engine library\n", " See also: https://developers.google.com/earth-engine/landsat\n", "\n", " Parameters:\n", @@ -156,7 +164,7 @@ "\n", " Returns:\n", " image_composite (ee.image.Image)\n", - " \"\"\"\n", + " \"\"\"\n", " collection = ee.ImageCollection(collection)\n", "\n", " ## Filter by time range and location\n", @@ -165,8 +173,9 @@ " image_composite = ee.Algorithms.Landsat.simpleComposite(image_area, 75, 3)\n", " return image_composite\n", "\n", + "\n", "def obtain_image_median(collection, time_range, area):\n", - " \"\"\" Selection of median from a collection of images in the Earth Engine library\n", + " \"\"\"Selection of median from a collection of images in the Earth Engine library\n", " See also: https://developers.google.com/earth-engine/reducers_image_collection\n", "\n", " Parameters:\n", @@ -176,7 +185,7 @@ "\n", " Returns:\n", " image_median (ee.image.Image)\n", - " \"\"\"\n", + " \"\"\"\n", " collection = ee.ImageCollection(collection)\n", "\n", " ## Filter by time range and location\n", @@ -185,8 +194,9 @@ " image_median = image_area.median()\n", " return image_median\n", "\n", + "\n", "def obtain_image_sentinel(collection, time_range, area):\n", - " \"\"\" Selection of median, cloud-free image from a collection of images in the Sentinel 2 dataset\n", + " \"\"\"Selection of median, cloud-free image from a collection of images in the Sentinel 2 dataset\n", " See also: https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_S2\n", "\n", " Parameters:\n", @@ -196,24 +206,28 @@ "\n", " Returns:\n", " sentinel_median (ee.image.Image)\n", - " \"\"\"\n", - "#First, method to remove cloud from the image\n", + " \"\"\"\n", + "\n", + " # First, method to remove cloud from the image\n", " def maskclouds(image):\n", - " band_qa = image.select('QA60')\n", + " band_qa = image.select(\"QA60\")\n", " cloud_mask = ee.Number(2).pow(10).int()\n", " cirrus_mask = ee.Number(2).pow(11).int()\n", - " mask = band_qa.bitwiseAnd(cloud_mask).eq(0) and(\n", - " band_qa.bitwiseAnd(cirrus_mask).eq(0))\n", + " mask = band_qa.bitwiseAnd(cloud_mask).eq(0) and (\n", + " band_qa.bitwiseAnd(cirrus_mask).eq(0)\n", + " )\n", " return image.updateMask(mask).divide(10000)\n", "\n", - " sentinel_filtered = (ee.ImageCollection(collection).\n", - " filterBounds(area).\n", - " filterDate(time_range[0], time_range[1]).\n", - " filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE', 20)).\n", - " map(maskclouds))\n", + " sentinel_filtered = (\n", + " ee.ImageCollection(collection)\n", + " .filterBounds(area)\n", + " .filterDate(time_range[0], time_range[1])\n", + " .filter(ee.Filter.lt(\"CLOUDY_PIXEL_PERCENTAGE\", 20))\n", + " .map(maskclouds)\n", + " )\n", "\n", " sentinel_median = sentinel_filtered.median()\n", - " return sentinel_median\n" + " return sentinel_median" ] }, { @@ -232,13 +246,15 @@ } ], "source": [ - "#Application to examples\n", - "composite_dresden = obtain_image_landsat_composite(collection_dresden, time_range_dresden, area_dresden)\n", + "# Application to examples\n", + "composite_dresden = obtain_image_landsat_composite(\n", + " collection_dresden, time_range_dresden, area_dresden\n", + ")\n", "median_swiss = obtain_image_median(collection_swiss, time_range_swiss, area_swiss)\n", "zurich_median = obtain_image_sentinel(collection_zurich, time_range_zurich, area_zurich)\n", "\n", - "#Selection of specific bands from an image\n", - "zurich_band = zurich_median.select(['B4','B3','B2']) \n", + "# Selection of specific bands from an image\n", + "zurich_band = zurich_median.select([\"B4\", \"B3\", \"B2\"])\n", "\n", "\n", "print(composite_dresden.getInfo())\n", @@ -279,7 +295,7 @@ "\n", "region_dresden = get_region(area_dresden)\n", "region_swiss = get_region(area_swiss)\n", - "region_zurich= get_region(area_zurich)" + "region_zurich = get_region(area_zurich)" ] }, { @@ -321,24 +337,19 @@ "\n", " Returns:\n", " path (str)\n", - " \"\"\"\n", - " path = image.getDownloadURL({\n", - " 'name':(name),\n", - " 'scale': scale,\n", - " 'region':(region)\n", - " })\n", + " \"\"\"\n", + " path = image.getDownloadURL({\"name\": (name), \"scale\": scale, \"region\": (region)})\n", "\n", " webbrowser.open_new_tab(path)\n", " return path\n", "\n", - " \n", - " \n", - "url_swiss = get_url('swiss_pop', median_swiss, 900, region_swiss)\n", - "url_dresden = get_url('dresden', composite_dresden, 30, region_dresden)\n", - "url_landcover = get_url('landcover_swiss', landCover_layer, 100, region_swiss)\n", "\n", - "#For the example of Zürich, due to size, it doesn't work on Jupyter Notebook but it works on Python\n", - "#url_zurich = get_url('sentinel', zurich_band, 10, region_zurich)\n", + "url_swiss = get_url(\"swiss_pop\", median_swiss, 900, region_swiss)\n", + "url_dresden = get_url(\"dresden\", composite_dresden, 30, region_dresden)\n", + "url_landcover = get_url(\"landcover_swiss\", landCover_layer, 100, region_swiss)\n", + "\n", + "# For the example of Zürich, due to size, it doesn't work on Jupyter Notebook but it works on Python\n", + "# url_zurich = get_url('sentinel', zurich_band, 10, region_zurich)\n", "\n", "print(url_swiss)\n", "print(url_dresden)\n", @@ -387,7 +398,7 @@ "from skimage.filters import try_all_threshold\n", "from skimage.filters import threshold_otsu, threshold_local\n", "from skimage import measure\n", - "from skimage import feature\n" + "from skimage import feature" ] }, { @@ -398,8 +409,8 @@ "source": [ "from climada.util import DEMO_DIR\n", "\n", - "swiss_pop = DEMO_DIR.joinpath('earth_engine', 'population-density_median.tif')\n", - "dresden = DEMO_DIR.joinpath('earth_engine', 'dresden.tif') #B4 of Dresden example\n" + "swiss_pop = DEMO_DIR.joinpath(\"earth_engine\", \"population-density_median.tif\")\n", + "dresden = DEMO_DIR.joinpath(\"earth_engine\", \"dresden.tif\") # B4 of Dresden example" ] }, { @@ -433,19 +444,19 @@ } ], "source": [ - "#Read a tif in python and Visualize the image\n", + "# Read a tif in python and Visualize the image\n", "image_dresden = imread(dresden)\n", "plt.figure(figsize=(10, 10))\n", - "plt.imshow(image_dresden, cmap='gray', interpolation='nearest')\n", + "plt.imshow(image_dresden, cmap=\"gray\", interpolation=\"nearest\")\n", "plt.axis()\n", "plt.show()\n", "\n", - "#Crop the image\n", - "image_dresden_crop=image_dresden[300:700,600:1400]\n", + "# Crop the image\n", + "image_dresden_crop = image_dresden[300:700, 600:1400]\n", "plt.figure(figsize=(10, 10))\n", - "plt.imshow(image_dresden_crop, cmap='gray', interpolation='nearest')\n", + "plt.imshow(image_dresden_crop, cmap=\"gray\", interpolation=\"nearest\")\n", "plt.axis()\n", - "plt.show()\n" + "plt.show()" ] }, { @@ -467,12 +478,12 @@ } ], "source": [ - "image_pop= imread(swiss_pop)\n", + "image_pop = imread(swiss_pop)\n", "plt.figure(figsize=(12, 12))\n", - "plt.imshow(image_pop, cmap='Reds', interpolation='nearest')\n", + "plt.imshow(image_pop, cmap=\"Reds\", interpolation=\"nearest\")\n", "plt.colorbar()\n", "plt.axis()\n", - "plt.show()\n" + "plt.show()" ] }, { @@ -501,7 +512,7 @@ } ], "source": [ - "#Thresholding: Selection of pixels with regards with their value\n", + "# Thresholding: Selection of pixels with regards with their value\n", "\n", "global_thresh = threshold_otsu(image_dresden_crop)\n", "binary_global = image_dresden_crop > global_thresh\n", @@ -515,19 +526,19 @@ "plt.gray()\n", "\n", "ax[0].imshow(image_dresden_crop)\n", - "ax[0].set_title('Original')\n", + "ax[0].set_title(\"Original\")\n", "\n", "ax[1].imshow(binary_global)\n", - "ax[1].set_title('Global thresholding')\n", + "ax[1].set_title(\"Global thresholding\")\n", "\n", "ax[2].imshow(binary_adaptive)\n", - "ax[2].set_title('Adaptive thresholding')\n", + "ax[2].set_title(\"Adaptive thresholding\")\n", "\n", "for a in ax:\n", - " a.axis('off')\n", + " a.axis(\"off\")\n", "plt.show()\n", "\n", - "print(np.sum(binary_global))\n" + "print(np.sum(binary_global))" ] } ], diff --git a/doc/tutorial/climada_util_yearsets.ipynb b/doc/tutorial/climada_util_yearsets.ipynb index 747d29fcf2..9ead010193 100644 --- a/doc/tutorial/climada_util_yearsets.ipynb +++ b/doc/tutorial/climada_util_yearsets.ipynb @@ -40,11 +40,11 @@ "import climada.util.yearsets as yearsets\n", "from climada.engine import Impact\n", "\n", - "# dummy event_impacts object containing 10 event_impacts with the values 10-110 \n", + "# dummy event_impacts object containing 10 event_impacts with the values 10-110\n", "# and the frequency 0.2 (Return period of 5 years)\n", "imp = Impact()\n", - "imp.at_event = np.arange(10,110,10)\n", - "imp.frequency = np.array(np.ones(10)*0.2)\n", + "imp.at_event = np.arange(10, 110, 10)\n", + "imp.frequency = np.array(np.ones(10) * 0.2)\n", "\n", "# the number of years to sample impacts for (length(yimp.at_event) = sampled_years)\n", "sampled_years = 10\n", @@ -147,11 +147,13 @@ ], "source": [ "# compare the resulting yimp with our step-by-step computation without applying the correction factor:\n", - "yimp, sampling_vect = yearsets.impact_yearset(imp, sampled_years=list(range(1,11)), correction_fac=False)\n", + "yimp, sampling_vect = yearsets.impact_yearset(\n", + " imp, sampled_years=list(range(1, 11)), correction_fac=False\n", + ")\n", "\n", - "print('The yimp.at_event values equal our step-by-step computed imp_per_year:')\n", - "print('yimp.at_event = ', yimp.at_event)\n", - "print('imp_per_year = ', imp_per_year)" + "print(\"The yimp.at_event values equal our step-by-step computed imp_per_year:\")\n", + "print(\"yimp.at_event = \", yimp.at_event)\n", + "print(\"imp_per_year = \", imp_per_year)" ] }, { @@ -173,12 +175,14 @@ ], "source": [ "# and here the same comparison with applying the correction factor (default settings):\n", - "yimp, sampling_vect = yearsets.impact_yearset(imp, sampled_years=list(range(1,11)))\n", + "yimp, sampling_vect = yearsets.impact_yearset(imp, sampled_years=list(range(1, 11)))\n", "\n", - "print('The same can be shown for the case of applying the correction factor.' \n", - " 'The yimp.at_event values equal our step-by-step computed imp_per year:')\n", - "print('yimp.at_event = ', yimp.at_event)\n", - "print('imp_per_year = ', imp_per_year/correction_factor)" + "print(\n", + " \"The same can be shown for the case of applying the correction factor.\"\n", + " \"The yimp.at_event values equal our step-by-step computed imp_per year:\"\n", + ")\n", + "print(\"yimp.at_event = \", yimp.at_event)\n", + "print(\"imp_per_year = \", imp_per_year / correction_factor)" ] } ], diff --git a/script/applications/eca_san_salvador/README.txt b/script/applications/eca_san_salvador/README.txt index e81b3188ee..7b3fa3df35 100644 --- a/script/applications/eca_san_salvador/README.txt +++ b/script/applications/eca_san_salvador/README.txt @@ -2,4 +2,4 @@ These notebooks show how to use CLIMADA in local case studies. The data shown was generated for the Economics of Climate Adaptation study developed with KfW in San Salvador, El Salvador. These represent only a partial outcome of the project. Execute first San_Salvador_Risk.ipynb and then San_Salvador_Adaptation.ipynb. -Contact Gabriela Aznar Siguan for any questions. +Contact Gabriela Aznar Siguan for any questions. diff --git a/script/applications/eca_san_salvador/San_Salvador_Adaptacion.ipynb b/script/applications/eca_san_salvador/San_Salvador_Adaptacion.ipynb index 21fb05cdbf..5a50f09d50 100644 --- a/script/applications/eca_san_salvador/San_Salvador_Adaptacion.ipynb +++ b/script/applications/eca_san_salvador/San_Salvador_Adaptacion.ipynb @@ -128,18 +128,20 @@ "import contextily as ctx\n", "from climada.entity import Entity\n", "\n", - "ent_2015 = Entity.from_excel('FL_entity_Acelhuate_houses.xlsx')\n", - "ent_2015.exposures.ref_year = 2015 # fijamos el año de referencia\n", + "ent_2015 = Entity.from_excel(\"FL_entity_Acelhuate_houses.xlsx\")\n", + "ent_2015.exposures.ref_year = 2015 # fijamos el año de referencia\n", "ent_2015.check()\n", "\n", "# Exposures (bienes): los utilizados en el script San Salvador Risk\n", - "print('Total value in 2015: {:.3e}'.format(ent_2015.exposures.gdf.value.sum()))\n", - "ax = ent_2015.exposures.plot_basemap(s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap='autumn')\n", - "ax.set_title('Exposure 2015')\n", + "print(\"Total value in 2015: {:.3e}\".format(ent_2015.exposures.gdf.value.sum()))\n", + "ax = ent_2015.exposures.plot_basemap(\n", + " s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap=\"autumn\"\n", + ")\n", + "ax.set_title(\"Exposure 2015\")\n", "\n", "# Impact Functions (funciones de impacto): los utilizados en el script San Salvador Risk\n", - "ent_2015.impact_funcs.get_func('FL', 101).plot()\n", - "ent_2015.impact_funcs.get_func('FL', 102).plot()\n", + "ent_2015.impact_funcs.get_func(\"FL\", 101).plot()\n", + "ent_2015.impact_funcs.get_func(\"FL\", 102).plot()\n", "\n", "# Discount rate (tasas de descuento): 2% anual hasta 2040\n", "ent_2015.disc_rates.plot();\n", @@ -230,12 +232,16 @@ "# Exposures (bienes): crecimiento anual del 2%\n", "ent_2040.exposures.ref_year = 2040\n", "growth = 0.02\n", - "ent_2040.exposures.gdf['value'] = ent_2040.exposures.gdf.value.values*(1 + growth)**(ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", - "ent_2040.check() # check values are well set and assignes default values\n", + "ent_2040.exposures.gdf[\"value\"] = ent_2040.exposures.gdf.value.values * (\n", + " 1 + growth\n", + ") ** (ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", + "ent_2040.check() # check values are well set and assignes default values\n", "\n", - "print('Valor total en 2040: {:.3e}'.format(ent_2040.exposures.gdf.value.sum()))\n", - "ax = ent_2040.exposures.plot_basemap(s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap='autumn')\n", - "ax.set_title('Exposure 2040');" + "print(\"Valor total en 2040: {:.3e}\".format(ent_2040.exposures.gdf.value.sum()))\n", + "ax = ent_2040.exposures.plot_basemap(\n", + " s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap=\"autumn\"\n", + ")\n", + "ax.set_title(\"Exposure 2040\");" ] }, { @@ -286,11 +292,13 @@ "import matplotlib.patches as patches\n", "from climada.hazard import Hazard\n", "\n", - "HAZ_FILE = 'Salvador_hazard_FL_2015.mat'\n", + "HAZ_FILE = \"Salvador_hazard_FL_2015.mat\"\n", "\n", - "haz_2015 = Hazard.from_mat(HAZ_FILE) # cargamos el fichero\n", + "haz_2015 = Hazard.from_mat(HAZ_FILE) # cargamos el fichero\n", "ax = haz_2015.plot_intensity(0) # intensidad máxima alcanzada en cada punto\n", - "rect = patches.Rectangle((-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor='r', facecolor='none') # add exposures focus\n", + "rect = patches.Rectangle(\n", + " (-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor=\"r\", facecolor=\"none\"\n", + ") # add exposures focus\n", "ax.add_patch(rect);" ] }, @@ -348,11 +356,13 @@ "# inundaciones en 2040 bajo un fuerte cambio climático\n", "from climada.hazard import Hazard\n", "\n", - "HAZ_FILE = 'Salvador_hazard_FL_2040_extreme_cc.mat'\n", + "HAZ_FILE = \"Salvador_hazard_FL_2040_extreme_cc.mat\"\n", "\n", - "haz_2040 = Hazard.from_mat(HAZ_FILE) # cargamos el fichero\n", + "haz_2040 = Hazard.from_mat(HAZ_FILE) # cargamos el fichero\n", "ax = haz_2040.plot_intensity(0) # intensidad máxima alcanzada en cada punto\n", - "rect = patches.Rectangle((-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor='r', facecolor='none') # add exposures focus\n", + "rect = patches.Rectangle(\n", + " (-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor=\"r\", facecolor=\"none\"\n", + ") # add exposures focus\n", "ax.add_patch(rect);" ] }, @@ -410,7 +420,7 @@ "\n", "cb_acel = CostBenefit()\n", "ax = cb_acel.plot_waterfall(haz_2015, ent_2015, haz_2040, ent_2040)\n", - "ax.set_title('Expected Annual Impact');" + "ax.set_title(\"Expected Annual Impact\");" ] }, { @@ -460,8 +470,10 @@ "from climada.engine import risk_rp_100\n", "\n", "cb_acel = CostBenefit()\n", - "ax = cb_acel.plot_waterfall(haz_2015, ent_2015, haz_2040, ent_2040, risk_func=risk_rp_100)\n", - "ax.set_title('Impact Exceedance at 100 years Return Period');" + "ax = cb_acel.plot_waterfall(\n", + " haz_2015, ent_2015, haz_2040, ent_2040, risk_func=risk_rp_100\n", + ")\n", + "ax.set_title(\"Impact Exceedance at 100 years Return Period\");" ] }, { @@ -524,22 +536,25 @@ ], "source": [ "# impacto de la medida en 2015: No descargas en Lluvia\n", - "meas = ent_2015.measures.get_measure('FL', 'No descargas en Lluvia')\n", - "print('Measure cost {:.3e} USD'.format(meas.cost))\n", - "meas_exp_2015, meas_impf_2015, meas_haz_2015 = meas.apply(ent_2015.exposures, ent_2015.impact_funcs, haz_2015)\n", + "meas = ent_2015.measures.get_measure(\"FL\", \"No descargas en Lluvia\")\n", + "print(\"Measure cost {:.3e} USD\".format(meas.cost))\n", + "meas_exp_2015, meas_impf_2015, meas_haz_2015 = meas.apply(\n", + " ent_2015.exposures, ent_2015.impact_funcs, haz_2015\n", + ")\n", "\n", "# los bienes no cambian\n", "\n", "# las funciones de impacto mejoran ligeramente:\n", - "ax = meas_impf_2015.get_func('FL', 101).plot()\n", - "ax.set_title('Flooding AUP House with measure')\n", + "ax = meas_impf_2015.get_func(\"FL\", 101).plot()\n", + "ax.set_title(\"Flooding AUP House with measure\")\n", "\n", "# inundación reducida hasta 4.8 metros en los eventos más graves:\n", "import numpy as np\n", + "\n", "haz_diff = copy.deepcopy(haz_2015)\n", - "haz_diff.intensity = (haz_2015.intensity - meas_haz_2015.intensity)\n", - "ax = haz_diff.plot_intensity(0) # maximum intensity difference at each point\n", - "ax.set_title('Maximumm reduced intensity with measure');" + "haz_diff.intensity = haz_2015.intensity - meas_haz_2015.intensity\n", + "ax = haz_diff.plot_intensity(0) # maximum intensity difference at each point\n", + "ax.set_title(\"Maximumm reduced intensity with measure\");" ] }, { @@ -569,7 +584,7 @@ ], "source": [ "# nombre de cada medida considerada\n", - "for meas in ent_2040.measures.get_measure('FL'): # measures related to flood (FL)\n", + "for meas in ent_2040.measures.get_measure(\"FL\"): # measures related to flood (FL)\n", " print(meas.name)" ] }, @@ -668,8 +683,8 @@ ], "source": [ "cb_acel = CostBenefit()\n", - "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # calcula\n", - "cb_acel.plot_cost_benefit(); # dibuja el cociente beneficio/costo por medida" + "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # calcula\n", + "cb_acel.plot_cost_benefit(); # dibuja el cociente beneficio/costo por medida" ] }, { @@ -718,8 +733,13 @@ ], "source": [ "import matplotlib.colors as colors\n", - "cb_comb = cb_acel.combine_measures(['No descargas en Lluvia', 'Ahorradores en Agua en casas'],\n", - " 'Domestico', colors.to_rgb('lightcoral'), ent_2015.disc_rates)" + "\n", + "cb_comb = cb_acel.combine_measures(\n", + " [\"No descargas en Lluvia\", \"Ahorradores en Agua en casas\"],\n", + " \"Domestico\",\n", + " colors.to_rgb(\"lightcoral\"),\n", + " ent_2015.disc_rates,\n", + ")" ] }, { @@ -765,8 +785,14 @@ } ], "source": [ - "cb_comb.apply_risk_transfer('Domestico', attachment=1000, cover=22000000, \n", - " disc_rates=ent_2015.disc_rates, cost_fix=0, cost_factor=1.5)" + "cb_comb.apply_risk_transfer(\n", + " \"Domestico\",\n", + " attachment=1000,\n", + " cover=22000000,\n", + " disc_rates=ent_2015.disc_rates,\n", + " cost_fix=0,\n", + " cost_factor=1.5,\n", + ")" ] }, { @@ -859,7 +885,9 @@ ], "source": [ "ax = cb_acel.plot_waterfall_accumulated(haz_2015, ent_2015, ent_2040)\n", - "cb_acel.plot_arrow_averted(ax, accumulate=True, combine=True, disc_rates=ent_2015.disc_rates) # plot total averted damages" + "cb_acel.plot_arrow_averted(\n", + " ax, accumulate=True, combine=True, disc_rates=ent_2015.disc_rates\n", + ") # plot total averted damages" ] }, { @@ -893,6 +921,7 @@ "source": [ "# show difference between sublinear, linear and superlinear expected annual damage growth\n", "import functions_ss\n", + "\n", "functions_ss.non_linear_growth(cb_acel)" ] }, @@ -996,10 +1025,14 @@ ], "source": [ "# change growth\n", - "growth_fact = 0.5 # < 1: sublinear, >1: superlinear\n", + "growth_fact = 0.5 # < 1: sublinear, >1: superlinear\n", "cb_acel_sub = CostBenefit()\n", - "cb_acel_sub.calc(haz_2015, ent_2015, haz_2040, ent_2040, imp_time_depen=growth_fact, save_imp=True)\n", - "cb_acel_sub.plot_waterfall_accumulated(haz_2015, ent_2015, ent_2040, imp_time_depen=growth_fact);" + "cb_acel_sub.calc(\n", + " haz_2015, ent_2015, haz_2040, ent_2040, imp_time_depen=growth_fact, save_imp=True\n", + ")\n", + "cb_acel_sub.plot_waterfall_accumulated(\n", + " haz_2015, ent_2015, ent_2040, imp_time_depen=growth_fact\n", + ");" ] }, { diff --git a/script/applications/eca_san_salvador/San_Salvador_Adaptation.ipynb b/script/applications/eca_san_salvador/San_Salvador_Adaptation.ipynb index 0701e47598..98388d9913 100644 --- a/script/applications/eca_san_salvador/San_Salvador_Adaptation.ipynb +++ b/script/applications/eca_san_salvador/San_Salvador_Adaptation.ipynb @@ -92,22 +92,25 @@ ], "source": [ "from warnings import simplefilter\n", - "simplefilter(action='ignore')\n", + "\n", + "simplefilter(action=\"ignore\")\n", "import contextily as ctx\n", "from climada.entity import Entity\n", "\n", - "ent_2015 = Entity.from_excel('FL_entity_Acelhuate_houses.xlsx')\n", - "ent_2015.exposures.ref_year = 2015 # fix reference year\n", + "ent_2015 = Entity.from_excel(\"FL_entity_Acelhuate_houses.xlsx\")\n", + "ent_2015.exposures.ref_year = 2015 # fix reference year\n", "ent_2015.check()\n", "\n", "# Exposures: the ones we used in San Salvador Risk script\n", - "print('Total value in 2015: {:.3e}'.format(ent_2015.exposures.gdf.value.sum()))\n", - "ax = ent_2015.exposures.plot_basemap(s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap='autumn')\n", - "ax.set_title('Exposure 2015')\n", + "print(\"Total value in 2015: {:.3e}\".format(ent_2015.exposures.gdf.value.sum()))\n", + "ax = ent_2015.exposures.plot_basemap(\n", + " s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap=\"autumn\"\n", + ")\n", + "ax.set_title(\"Exposure 2015\")\n", "\n", "# Impact Functions: the ones we used in San Salvador Risk script\n", - "ent_2015.impact_funcs.get_func('FL', 101).plot()\n", - "ent_2015.impact_funcs.get_func('FL', 102).plot()\n", + "ent_2015.impact_funcs.get_func(\"FL\", 101).plot()\n", + "ent_2015.impact_funcs.get_func(\"FL\", 102).plot()\n", "\n", "# Discount rate: 2% yearly discount year until 2040\n", "ent_2015.disc_rates.plot();\n", @@ -165,12 +168,16 @@ "# Exposures: yearl economic growth of 2% in exposures\n", "ent_2040.exposures.ref_year = 2040\n", "growth = 0.02\n", - "ent_2040.exposures.gdf['value'] = ent_2040.exposures.gdf.value.values*(1 + growth)**(ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", - "ent_2040.check() # check values are well set and assignes default values\n", + "ent_2040.exposures.gdf[\"value\"] = ent_2040.exposures.gdf.value.values * (\n", + " 1 + growth\n", + ") ** (ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", + "ent_2040.check() # check values are well set and assignes default values\n", "\n", - "print('Total value in 2040: {:.3e}'.format(ent_2040.exposures.gdf.value.sum()))\n", - "ax = ent_2040.exposures.plot_basemap(s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap='autumn')\n", - "ax.set_title('Exposure 2040');" + "print(\"Total value in 2040: {:.3e}\".format(ent_2040.exposures.gdf.value.sum()))\n", + "ax = ent_2040.exposures.plot_basemap(\n", + " s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap=\"autumn\"\n", + ")\n", + "ax.set_title(\"Exposure 2040\");" ] }, { @@ -212,11 +219,13 @@ "import matplotlib.patches as patches\n", "from climada.hazard import Hazard\n", "\n", - "HAZ_FILE = 'Salvador_hazard_FL_2015.mat'\n", + "HAZ_FILE = \"Salvador_hazard_FL_2015.mat\"\n", "\n", - "haz_2015 = Hazard.from_mat(HAZ_FILE) # load file\n", + "haz_2015 = Hazard.from_mat(HAZ_FILE) # load file\n", "ax = haz_2015.plot_intensity(0) # maximum intensity reached at each point\n", - "rect = patches.Rectangle((-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor='r', facecolor='none') # add exposures focus\n", + "rect = patches.Rectangle(\n", + " (-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor=\"r\", facecolor=\"none\"\n", + ") # add exposures focus\n", "ax.add_patch(rect);" ] }, @@ -265,11 +274,13 @@ "# flood as for 2040 with extreme climate change\n", "from climada.hazard import Hazard\n", "\n", - "HAZ_FILE = 'Salvador_hazard_FL_2040_extreme_cc.mat'\n", + "HAZ_FILE = \"Salvador_hazard_FL_2040_extreme_cc.mat\"\n", "\n", - "haz_2040 = Hazard.from_mat(HAZ_FILE) # load file\n", + "haz_2040 = Hazard.from_mat(HAZ_FILE) # load file\n", "ax = haz_2040.plot_intensity(0) # maximum intensity reached at each point\n", - "rect = patches.Rectangle((-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor='r', facecolor='none') # add exposures focus\n", + "rect = patches.Rectangle(\n", + " (-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor=\"r\", facecolor=\"none\"\n", + ") # add exposures focus\n", "ax.add_patch(rect);" ] }, @@ -310,7 +321,7 @@ "\n", "cb_acel = CostBenefit()\n", "ax = cb_acel.plot_waterfall(haz_2015, ent_2015, haz_2040, ent_2040)\n", - "ax.set_title('Expected Annual Impact');" + "ax.set_title(\"Expected Annual Impact\");" ] }, { @@ -345,8 +356,10 @@ "from climada.engine import risk_rp_100\n", "\n", "cb_acel = CostBenefit()\n", - "ax = cb_acel.plot_waterfall(haz_2015, ent_2015, haz_2040, ent_2040, risk_func=risk_rp_100)\n", - "ax.set_title('Impact Exceedance at 100 years Return Period');" + "ax = cb_acel.plot_waterfall(\n", + " haz_2015, ent_2015, haz_2040, ent_2040, risk_func=risk_rp_100\n", + ")\n", + "ax.set_title(\"Impact Exceedance at 100 years Return Period\");" ] }, { @@ -400,22 +413,25 @@ ], "source": [ "# Measure impact in 2015: No descargas en Lluvia\n", - "meas = ent_2015.measures.get_measure('FL', 'No descargas en Lluvia')\n", - "print('Measure cost {:.3e} USD'.format(meas.cost))\n", - "meas_exp_2015, meas_impf2015, meas_haz_2015 = meas.apply(ent_2015.exposures, ent_2015.impact_funcs, haz_2015)\n", + "meas = ent_2015.measures.get_measure(\"FL\", \"No descargas en Lluvia\")\n", + "print(\"Measure cost {:.3e} USD\".format(meas.cost))\n", + "meas_exp_2015, meas_impf2015, meas_haz_2015 = meas.apply(\n", + " ent_2015.exposures, ent_2015.impact_funcs, haz_2015\n", + ")\n", "\n", "# exposures stay the same\n", "\n", "# impact functions slightly improved:\n", - "ax = meas_impf2015.get_func('FL', 101).plot()\n", - "ax.set_title('Flooding AUP House with measure')\n", + "ax = meas_impf2015.get_func(\"FL\", 101).plot()\n", + "ax.set_title(\"Flooding AUP House with measure\")\n", "\n", "# flood reduced up to 4.8 meters in worst events:\n", "import numpy as np\n", + "\n", "haz_diff = copy.deepcopy(haz_2015)\n", - "haz_diff.intensity = (haz_2015.intensity - meas_haz_2015.intensity)\n", - "ax = haz_diff.plot_intensity(0) # maximum intensity difference at each point\n", - "ax.set_title('Maximumm reduced intensity with measure');" + "haz_diff.intensity = haz_2015.intensity - meas_haz_2015.intensity\n", + "ax = haz_diff.plot_intensity(0) # maximum intensity difference at each point\n", + "ax.set_title(\"Maximumm reduced intensity with measure\");" ] }, { @@ -445,7 +461,7 @@ ], "source": [ "# name of every considered measure\n", - "for meas in ent_2040.measures.get_measure('FL'): # measures related to flood (FL)\n", + "for meas in ent_2040.measures.get_measure(\"FL\"): # measures related to flood (FL)\n", " print(meas.name)" ] }, @@ -491,8 +507,8 @@ ], "source": [ "cb_acel = CostBenefit()\n", - "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # compute\n", - "cb_acel.plot_cost_benefit(); # plot benefit/cost ratio per measure" + "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # compute\n", + "cb_acel.plot_cost_benefit(); # plot benefit/cost ratio per measure" ] }, { @@ -541,8 +557,13 @@ ], "source": [ "import matplotlib.colors as colors\n", - "cb_comb = cb_acel.combine_measures(['No descargas en Lluvia', 'Ahorradores en Agua en casas'],\n", - " 'Domestico', colors.to_rgb('lightcoral'), ent_2015.disc_rates)" + "\n", + "cb_comb = cb_acel.combine_measures(\n", + " [\"No descargas en Lluvia\", \"Ahorradores en Agua en casas\"],\n", + " \"Domestico\",\n", + " colors.to_rgb(\"lightcoral\"),\n", + " ent_2015.disc_rates,\n", + ")" ] }, { @@ -588,8 +609,14 @@ } ], "source": [ - "cb_comb.apply_risk_transfer('Domestico', attachment=1000, cover=22000000, \n", - " disc_rates=ent_2015.disc_rates, cost_fix=0, cost_factor=1.5)" + "cb_comb.apply_risk_transfer(\n", + " \"Domestico\",\n", + " attachment=1000,\n", + " cover=22000000,\n", + " disc_rates=ent_2015.disc_rates,\n", + " cost_fix=0,\n", + " cost_factor=1.5,\n", + ")" ] }, { @@ -674,7 +701,9 @@ ], "source": [ "ax = cb_acel.plot_waterfall_accumulated(haz_2015, ent_2015, ent_2040)\n", - "cb_acel.plot_arrow_averted(ax, accumulate=True, combine=True, disc_rates=ent_2015.disc_rates); # plot total averted damages" + "cb_acel.plot_arrow_averted(\n", + " ax, accumulate=True, combine=True, disc_rates=ent_2015.disc_rates\n", + "); # plot total averted damages" ] }, { @@ -705,6 +734,7 @@ "source": [ "# show difference between sublinear, linear and superlinear expected annual damage growth\n", "import functions_ss\n", + "\n", "functions_ss.non_linear_growth(cb_acel)" ] }, @@ -750,10 +780,14 @@ ], "source": [ "# change growth\n", - "growth_fact = 0.5 # < 1: sublinear, >1: superlinear\n", + "growth_fact = 0.5 # < 1: sublinear, >1: superlinear\n", "cb_acel_sub = CostBenefit()\n", - "cb_acel_sub.calc(haz_2015, ent_2015, haz_2040, ent_2040, imp_time_depen=growth_fact, save_imp=True)\n", - "cb_acel_sub.plot_waterfall_accumulated(haz_2015, ent_2015, ent_2040, imp_time_depen=growth_fact);" + "cb_acel_sub.calc(\n", + " haz_2015, ent_2015, haz_2040, ent_2040, imp_time_depen=growth_fact, save_imp=True\n", + ")\n", + "cb_acel_sub.plot_waterfall_accumulated(\n", + " haz_2015, ent_2015, ent_2040, imp_time_depen=growth_fact\n", + ");" ] }, { diff --git a/script/applications/eca_san_salvador/San_Salvador_Parametric.ipynb b/script/applications/eca_san_salvador/San_Salvador_Parametric.ipynb index 3cafb8b3c5..360be75115 100644 --- a/script/applications/eca_san_salvador/San_Salvador_Parametric.ipynb +++ b/script/applications/eca_san_salvador/San_Salvador_Parametric.ipynb @@ -44,13 +44,13 @@ "import contextily as ctx\n", "from climada.engine import Impact\n", "\n", - "ent_2015_param = Entity.from_excel('FL_entity_Acelhuate_parametric.xlsx')\n", - "ent_2015_param.exposures.ref_year = 2015 # fix reference year\n", + "ent_2015_param = Entity.from_excel(\"FL_entity_Acelhuate_parametric.xlsx\")\n", + "ent_2015_param.exposures.ref_year = 2015 # fix reference year\n", "ent_2015_param.check()\n", "\n", "# flood as for 2015\n", - "HAZ_FILE = 'Salvador_hazard_FL_2015.mat'\n", - "haz_2015 = Hazard.from_mat(HAZ_FILE) # load file" + "HAZ_FILE = \"Salvador_hazard_FL_2015.mat\"\n", + "haz_2015 = Hazard.from_mat(HAZ_FILE) # load file" ] }, { @@ -97,9 +97,13 @@ ], "source": [ "param_payout = Impact()\n", - "param_payout.calc(ent_2015_param.exposures, ent_2015_param.impact_funcs, haz_2015) # compute parametric payout\n", - "print('Annual expected payout: {:} USD'.format(param_payout.aai_agg)) # get average annual payout\n", - "param_payout.calc_freq_curve().plot() " + "param_payout.calc(\n", + " ent_2015_param.exposures, ent_2015_param.impact_funcs, haz_2015\n", + ") # compute parametric payout\n", + "print(\n", + " \"Annual expected payout: {:} USD\".format(param_payout.aai_agg)\n", + ") # get average annual payout\n", + "param_payout.calc_freq_curve().plot()" ] }, { @@ -163,8 +167,8 @@ } ], "source": [ - "ent_2015 = Entity.from_excel('FL_entity_Acelhuate_houses.xlsx')\n", - "ent_2015.exposures.ref_year = 2015 # fix reference year\n", + "ent_2015 = Entity.from_excel(\"FL_entity_Acelhuate_houses.xlsx\")\n", + "ent_2015.exposures.ref_year = 2015 # fix reference year\n", "ent_2015.check()\n", "\n", "ent_2040 = copy.deepcopy(ent_2015)\n", @@ -172,19 +176,25 @@ "# Exposures: yearly economic growth of 2% in exposures\n", "ent_2040.exposures.ref_year = 2040\n", "growth = 0.02\n", - "ent_2040.exposures.gdf['value'] = ent_2040.exposures.gdf.value.values*(1 + growth)**(ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", - "ent_2040.check() # check values are well set and assignes default values\n", + "ent_2040.exposures.gdf[\"value\"] = ent_2040.exposures.gdf.value.values * (\n", + " 1 + growth\n", + ") ** (ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", + "ent_2040.check() # check values are well set and assignes default values\n", "\n", "# flood as for 2040 with extreme climate change\n", - "HAZ_FILE = 'Salvador_hazard_FL_2040_extreme_cc.mat'\n", - "haz_2040 = Hazard.from_mat(HAZ_FILE) # load file\n", + "HAZ_FILE = \"Salvador_hazard_FL_2040_extreme_cc.mat\"\n", + "haz_2040 = Hazard.from_mat(HAZ_FILE) # load file\n", "\n", "# expected annual impact\n", "cb_acel = CostBenefit()\n", - "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # compute\n", + "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # compute\n", "\n", - "cb_comb = cb_acel.combine_measures(['No descargas en Lluvia', 'Ahorradores en Agua en casas'],\n", - " 'Domestico', colors.to_rgb('lightcoral'), ent_2015.disc_rates)" + "cb_comb = cb_acel.combine_measures(\n", + " [\"No descargas en Lluvia\", \"Ahorradores en Agua en casas\"],\n", + " \"Domestico\",\n", + " colors.to_rgb(\"lightcoral\"),\n", + " ent_2015.disc_rates,\n", + ")" ] }, { @@ -208,10 +218,12 @@ } ], "source": [ - "damage_after_measures=cb_comb.imp_meas_present['Domestico']['impact'].at_event\n", - "paramteric_payout=param_payout.at_event\n", - "residual_damage=np.sum((damage_after_measures-paramteric_payout)*haz_2015.frequency)\n", - "print('residual damage: {:.3e} USD'.format(residual_damage))" + "damage_after_measures = cb_comb.imp_meas_present[\"Domestico\"][\"impact\"].at_event\n", + "paramteric_payout = param_payout.at_event\n", + "residual_damage = np.sum(\n", + " (damage_after_measures - paramteric_payout) * haz_2015.frequency\n", + ")\n", + "print(\"residual damage: {:.3e} USD\".format(residual_damage))" ] } ], diff --git a/script/applications/eca_san_salvador/San_Salvador_Risk.ipynb b/script/applications/eca_san_salvador/San_Salvador_Risk.ipynb index 29da95b789..b73180b385 100644 --- a/script/applications/eca_san_salvador/San_Salvador_Risk.ipynb +++ b/script/applications/eca_san_salvador/San_Salvador_Risk.ipynb @@ -16,6 +16,7 @@ "%%capture\n", "# generate plots used in this script\n", "import functions_ss\n", + "\n", "fig_ma, fig_point, fig_houses, fig_if = functions_ss.generate_plots_risk()" ] }, @@ -245,7 +246,7 @@ } ], "source": [ - "acc_df = functions_ss.load_accounting() # load accounting.xlsx\n", + "acc_df = functions_ss.load_accounting() # load accounting.xlsx\n", "acc_df.head()" ] }, @@ -424,10 +425,10 @@ "import pandas as pd\n", "from climada.entity import Exposures\n", "\n", - "ENT_FILE = 'FL_entity_Acelhuate_houses.xlsx' # entity file name\n", + "ENT_FILE = \"FL_entity_Acelhuate_houses.xlsx\" # entity file name\n", "\n", "exp_acel = Exposures(pd.read_excel(ENT_FILE))\n", - "exp_acel.check() # check values are well set and assigns default values\n", + "exp_acel.check() # check values are well set and assigns default values\n", "exp_acel.gdf.head() # show first 5 rows" ] }, @@ -459,8 +460,12 @@ ], "source": [ "# some statistics on AUPs and non AUPs\n", - "print('Number of houses, mean and total value of AUP and non AUP: \\n')\n", - "print(exp_acel.gdf[['category', 'value']].groupby('category').agg(['count', 'mean', 'sum']))" + "print(\"Number of houses, mean and total value of AUP and non AUP: \\n\")\n", + "print(\n", + " exp_acel.gdf[[\"category\", \"value\"]]\n", + " .groupby(\"category\")\n", + " .agg([\"count\", \"mean\", \"sum\"])\n", + ")" ] }, { @@ -488,7 +493,7 @@ } ], "source": [ - "print(exp_acel.gdf[['category', 'impf_FL']].groupby('category').agg(['unique']))" + "print(exp_acel.gdf[[\"category\", \"impf_FL\"]].groupby(\"category\").agg([\"unique\"]))" ] }, { @@ -551,9 +556,11 @@ "impf_acel = ImpactFuncSet.from_excel(ENT_FILE)\n", "impf_acel.check()\n", "\n", - "print('MDD: mean damage ratio; PAA: percentage of afected assets; MDR = PAA*MDD: mean damage ratio:')\n", - "impf_acel.get_func('FL', 101).plot() # plot flood function 101\n", - "impf_acel.get_func('FL', 102).plot(); # plot flood function 102" + "print(\n", + " \"MDD: mean damage ratio; PAA: percentage of afected assets; MDR = PAA*MDD: mean damage ratio:\"\n", + ")\n", + "impf_acel.get_func(\"FL\", 101).plot() # plot flood function 101\n", + "impf_acel.get_func(\"FL\", 102).plot(); # plot flood function 102" ] }, { @@ -573,9 +580,9 @@ "source": [ "from climada.hazard import Hazard\n", "\n", - "HAZ_FILE = 'Salvador_hazard_FL_2015.mat'\n", + "HAZ_FILE = \"Salvador_hazard_FL_2015.mat\"\n", "\n", - "haz_acel = Hazard.from_mat(HAZ_FILE) # load file" + "haz_acel = Hazard.from_mat(HAZ_FILE) # load file" ] }, { @@ -680,7 +687,7 @@ "from climada.engine import Impact\n", "\n", "imp_acel = Impact()\n", - "imp_acel.calc(exp_acel, impf_acel, haz_acel) # compute hazard's impact over exposure" + "imp_acel.calc(exp_acel, impf_acel, haz_acel) # compute hazard's impact over exposure" ] }, { @@ -719,8 +726,10 @@ } ], "source": [ - "print('Annual expected impact: {:.3e} USD'.format(imp_acel.aai_agg)) # get average annual impact\n", - "imp_acel.calc_freq_curve().plot(); # plot exceedance frequency curve" + "print(\n", + " \"Annual expected impact: {:.3e} USD\".format(imp_acel.aai_agg)\n", + ") # get average annual impact\n", + "imp_acel.calc_freq_curve().plot(); # plot exceedance frequency curve" ] }, { @@ -748,7 +757,11 @@ "point_lat = exp_acel.gdf.latitude.values[point_idx]\n", "point_lon = exp_acel.gdf.longitude.values[point_idx]\n", "point_eai = imp_acel.eai_exp[point_idx]\n", - "print('Annual expected impact in {:.4f}° N {:.4f}° W is {:.0f} USD.'.format(-point_lat, point_lon, point_eai))" + "print(\n", + " \"Annual expected impact in {:.4f}° N {:.4f}° W is {:.0f} USD.\".format(\n", + " -point_lat, point_lon, point_eai\n", + " )\n", + ")" ] }, { @@ -796,7 +809,10 @@ ], "source": [ "import contextily as ctx\n", - "imp_acel.plot_basemap_eai_exposure(url=ctx.providers.OpenStreetMap.Mapnik, zoom=15, s=2, cmap='gnuplot');" + "\n", + "imp_acel.plot_basemap_eai_exposure(\n", + " url=ctx.providers.OpenStreetMap.Mapnik, zoom=15, s=2, cmap=\"gnuplot\"\n", + ");" ] }, { @@ -837,8 +853,15 @@ ], "source": [ "import numpy as np\n", - "ax = imp_acel.plot_basemap_eai_exposure(mask=np.argwhere((exp_acel.gdf.category==2).to_numpy()).reshape(-1), url=ctx.providers.OpenStreetMap.Mapnik, zoom=15, s=2, cmap='gnuplot')\n", - "ax.set_title('Expected Annual Impact - no AUP');" + "\n", + "ax = imp_acel.plot_basemap_eai_exposure(\n", + " mask=np.argwhere((exp_acel.gdf.category == 2).to_numpy()).reshape(-1),\n", + " url=ctx.providers.OpenStreetMap.Mapnik,\n", + " zoom=15,\n", + " s=2,\n", + " cmap=\"gnuplot\",\n", + ")\n", + "ax.set_title(\"Expected Annual Impact - no AUP\");" ] }, { @@ -878,8 +901,14 @@ } ], "source": [ - "ax = imp_acel.plot_basemap_eai_exposure(mask=np.argwhere((exp_acel.gdf.category==1).to_numpy()).reshape(-1), url=ctx.providers.OpenStreetMap.Mapnik, zoom=15, s=2, cmap='gnuplot')\n", - "ax.set_title('Expected Annual Impact - AUP');" + "ax = imp_acel.plot_basemap_eai_exposure(\n", + " mask=np.argwhere((exp_acel.gdf.category == 1).to_numpy()).reshape(-1),\n", + " url=ctx.providers.OpenStreetMap.Mapnik,\n", + " zoom=15,\n", + " s=2,\n", + " cmap=\"gnuplot\",\n", + ")\n", + "ax.set_title(\"Expected Annual Impact - AUP\");" ] }, { @@ -906,15 +935,21 @@ } ], "source": [ - "eai_aup = imp_acel.eai_exp[exp_acel.gdf[exp_acel.gdf.category==1].index].sum()\n", - "print('Annual expected impact of AUPs: {:.3e} USD.'.format(eai_aup))\n", - "eai_per_aup = eai_aup/exp_acel.gdf[exp_acel.gdf.category==1].value.sum()*100\n", - "print('Annual expected impact of AUPs over its total value: {:.2f}%.'.format(eai_per_aup))\n", + "eai_aup = imp_acel.eai_exp[exp_acel.gdf[exp_acel.gdf.category == 1].index].sum()\n", + "print(\"Annual expected impact of AUPs: {:.3e} USD.\".format(eai_aup))\n", + "eai_per_aup = eai_aup / exp_acel.gdf[exp_acel.gdf.category == 1].value.sum() * 100\n", + "print(\n", + " \"Annual expected impact of AUPs over its total value: {:.2f}%.\".format(eai_per_aup)\n", + ")\n", "\n", - "eai_no_aup = imp_acel.eai_exp[exp_acel.gdf[exp_acel.gdf.category==2].index].sum()\n", - "print('Annual expected impact of non AUPs: {:.3e} USD.'.format(eai_no_aup))\n", - "eai_per_no_aup = eai_no_aup/exp_acel.gdf[exp_acel.gdf.category==1].value.sum()*100\n", - "print('Annual expected impact of non AUPs over its total value: {:.2f}%.'.format(eai_per_no_aup))" + "eai_no_aup = imp_acel.eai_exp[exp_acel.gdf[exp_acel.gdf.category == 2].index].sum()\n", + "print(\"Annual expected impact of non AUPs: {:.3e} USD.\".format(eai_no_aup))\n", + "eai_per_no_aup = eai_no_aup / exp_acel.gdf[exp_acel.gdf.category == 1].value.sum() * 100\n", + "print(\n", + " \"Annual expected impact of non AUPs over its total value: {:.2f}%.\".format(\n", + " eai_per_no_aup\n", + " )\n", + ")" ] } ], diff --git a/script/applications/eca_san_salvador/functions_ss.py b/script/applications/eca_san_salvador/functions_ss.py index caee8a4f5f..3d04785589 100755 --- a/script/applications/eca_san_salvador/functions_ss.py +++ b/script/applications/eca_san_salvador/functions_ss.py @@ -19,47 +19,59 @@ Define WaterScarcity (WS) class. WORK IN PROGRESS """ + import contextily as ctx import geopandas as gpd import matplotlib.patches as patches from matplotlib import colormaps as cm from shapely import wkt + def plot_salvador_ma(): - risk_shape = 'POLYGON ((-89.25090785340315 13.671, -89.251 13.671, -89.251 13.67108933717579, -89.251 13.67117867435158, -89.251 13.67126801152738, -89.251 13.67135734870317, -89.251 13.67144668587896, -89.251 13.67153602305475, -89.251 13.67162536023055, -89.251 13.67171469740634, -89.251 13.67180403458213, -89.251 13.67189337175792, -89.251 13.67198270893372, -89.251 13.67207204610951, -89.251 13.6721613832853, -89.251 13.6722507204611, -89.251 13.67234005763689, -89.251 13.67242939481268, -89.251 13.67251873198847, -89.251 13.67260806916426, -89.251 13.67269740634006, -89.251 13.67278674351585, -89.251 13.67287608069164, -89.251 13.67296541786743, -89.251 13.67305475504323, -89.251 13.67314409221902, -89.251 13.67323342939481, -89.251 13.6733227665706, -89.251 13.6734121037464, -89.251 13.67350144092219, -89.251 13.67359077809798, -89.251 13.67368011527378, -89.251 13.67376945244957, -89.251 13.67385878962536, -89.251 13.67394812680115, -89.251 13.67403746397694, -89.251 13.67412680115274, -89.251 13.67421613832853, -89.251 13.67430547550432, -89.251 13.67439481268011, -89.251 13.67448414985591, -89.251 13.6745734870317, -89.251 13.67466282420749, -89.251 13.67475216138329, -89.251 13.67484149855908, -89.251 13.67493083573487, -89.251 13.67502017291066, -89.251 13.67510951008645, -89.251 13.67519884726225, -89.251 13.67528818443804, -89.251 13.67537752161383, -89.251 13.67546685878962, -89.251 13.67555619596542, -89.251 13.67564553314121, -89.251 13.675734870317, -89.251 13.67582420749279, -89.251 13.67591354466859, -89.251 13.67600288184438, -89.251 13.67609221902017, -89.251 13.67618155619597, -89.251 13.67627089337176, -89.251 13.67636023054755, -89.251 13.67644956772334, -89.251 13.67653890489913, -89.251 13.67662824207493, -89.251 13.67671757925072, -89.251 13.67680691642651, -89.251 13.6768962536023, -89.251 13.6769855907781, -89.251 13.67707492795389, -89.251 13.67716426512968, -89.251 13.67725360230548, -89.251 13.67734293948127, -89.251 13.67743227665706, -89.251 13.67752161383285, -89.251 13.67761095100865, -89.251 13.67770028818444, -89.251 13.67778962536023, -89.251 13.67787896253602, -89.251 13.67796829971181, -89.251 13.67805763688761, -89.25090785340315 13.67832564841498, -89.25081570680629 13.67850432276657, -89.25072356020942 13.67868299711816, -89.25063141361257 13.67886167146974, -89.250354973822 13.67921902017291, -89.25017068062827 13.67948703170029, -89.2498942408377 13.67984438040346, -89.24961780104712 13.68020172910663, -89.24934136125655 13.6805590778098, -89.24915706806283 13.68082708933717, -89.24888062827226 13.68118443804035, -89.24860418848168 13.68154178674352, -89.24832774869111 13.68189913544669, -89.24814345549738 13.68216714697406, -89.24786701570682 13.68252449567723, -89.24759057591623 13.6828818443804, -89.24740628272252 13.68314985590778, -89.24712984293194 13.68350720461095, -89.24685340314137 13.68386455331412, -89.24657696335079 13.68422190201729, -89.24639267015708 13.68448991354467, -89.24556335078535 13.68556195965418, -89.24510261780105 13.68609798270893, -89.2450104712042 13.68618731988473, -89.24491832460734 13.68627665706052, -89.24436544502618 13.68690201729107, -89.24427329842932 13.68699135446686, -89.24372041884817 13.68761671469741, -89.24362827225131 13.6877060518732, -89.24353612565446 13.68779538904899, -89.24298324607331 13.68842074927954, -89.24289109947644 13.68851008645533, -89.24233821989529 13.68913544668588, -89.24224607329843 13.68922478386167, -89.24169319371728 13.68985014409222, -89.24160104712043 13.68993948126801, -89.24150890052357 13.6900288184438, -89.24095602094241 13.69065417867435, -89.24086387434555 13.69074351585014, -89.24077172774869 13.69083285302594, -89.24067958115184 13.69092219020173, -89.24058743455498 13.69101152737752, -89.24049528795813 13.69110086455331, -89.24040314136126 13.69119020172911, -89.2403109947644 13.6912795389049, -89.24021884816754 13.69136887608069, -89.23975811518325 13.69163688760807, -89.23929738219896 13.69190489913545, -89.23865235602095 13.69226224783862, -89.23819162303666 13.69253025936599, -89.23773089005236 13.69279827089337, -89.23708586387436 13.69315561959654, -89.23662513089006 13.69342363112392, -89.23616439790577 13.6936916426513, -89.23570366492147 13.69395965417867, -89.23505863874345 13.69431700288184, -89.23459790575916 13.69458501440922, -89.23413717277488 13.6948530259366, -89.23376858638744 13.69494236311239, -89.23321570680629 13.69503170028818, -89.23266282722513 13.69512103746398, -89.23220209424085 13.69521037463977, -89.23164921465968 13.69529971181556, -89.23109633507853 13.69538904899135, -89.23054345549738 13.69547838616715, -89.23008272251309 13.69556772334294, -89.22952984293194 13.69565706051873, -89.22897696335079 13.69574639769452, -89.22851623036649 13.69583573487032, -89.22741047120419 13.6960144092219, -89.22685759162304 13.69610374639769, -89.22621256544502 13.69619308357349, -89.22612041884817 13.69619308357349, -89.22602827225131 13.69619308357349, -89.22593612565446 13.69619308357349, -89.2258439790576 13.69619308357349, -89.22575183246073 13.69619308357349, -89.22565968586387 13.69619308357349, -89.22556753926702 13.69619308357349, -89.22547539267016 13.69619308357349, -89.22538324607331 13.69619308357349, -89.22529109947644 13.69619308357349, -89.22519895287958 13.69619308357349, -89.22510680628272 13.69619308357349, -89.22501465968587 13.69619308357349, -89.22492251308901 13.69619308357349, -89.22483036649214 13.69619308357349, -89.22473821989529 13.69619308357349, -89.22464607329843 13.69619308357349, -89.22455392670157 13.69619308357349, -89.22446178010472 13.69619308357349, -89.22436963350786 13.69619308357349, -89.22427748691099 13.69619308357349, -89.22418534031414 13.69619308357349, -89.22409319371728 13.69619308357349, -89.22400104712042 13.69619308357349, -89.22390890052357 13.69619308357349, -89.2238167539267 13.69619308357349, -89.22372460732984 13.69619308357349, -89.22363246073299 13.69619308357349, -89.22354031413613 13.69619308357349, -89.22344816753927 13.69619308357349, -89.2233560209424 13.69619308357349, -89.22326387434555 13.69619308357349, -89.22317172774869 13.69619308357349, -89.2222502617801 13.69628242074928, -89.22215811518325 13.69628242074928, -89.22206596858639 13.69628242074928, -89.22197382198954 13.69628242074928, -89.22188167539267 13.69628242074928, -89.22178952879581 13.69628242074928, -89.22169738219895 13.69628242074928, -89.2216052356021 13.69628242074928, -89.22151308900524 13.69628242074928, -89.22142094240837 13.69628242074928, -89.22132879581152 13.69628242074928, -89.22123664921466 13.69628242074928, -89.2211445026178 13.69628242074928, -89.22105235602095 13.69628242074928, -89.22096020942409 13.69628242074928, -89.22086806282722 13.69628242074928, -89.22077591623037 13.69628242074928, -89.22059162303665 13.69619308357349, -89.22031518324607 13.6960144092219, -89.21985445026178 13.69574639769452, -89.21957801047121 13.69556772334294, -89.21930157068063 13.69538904899135, -89.21902513089006 13.69521037463977, -89.21874869109948 13.69503170028818, -89.21828795811518 13.69476368876081, -89.21801151832462 13.69458501440922, -89.21773507853403 13.69440634005764, -89.21745863874345 13.69422766570605, -89.21699790575916 13.69395965417867, -89.21672146596859 13.69378097982709, -89.21644502617801 13.6936023054755, -89.21616858638744 13.69342363112392, -89.21589214659686 13.69324495677233, -89.21543141361256 13.69297694524496, -89.215154973822 13.69279827089337, -89.21091623036649 13.69226224783862, -89.21063979057591 13.69235158501441, -89.21036335078534 13.6924409221902, -89.21008691099476 13.69253025936599, -89.20981047120419 13.69261959654179, -89.2093497382199 13.69279827089337, -89.20907329842932 13.69288760806916, -89.20879685863875 13.69297694524496, -89.20852041884817 13.69306628242075, -89.2082439790576 13.69315561959654, -89.20815183246073 13.69315561959654, -89.20805968586387 13.69315561959654, -89.20796753926702 13.69315561959654, -89.20787539267016 13.69315561959654, -89.2077832460733 13.69315561959654, -89.20769109947643 13.69315561959654, -89.20695392670157 13.69306628242075, -89.20630890052357 13.69297694524496, -89.20557172774869 13.69288760806916, -89.20492670157068 13.69279827089337, -89.20418952879581 13.69270893371758, -89.2035445026178 13.69261959654179, -89.20280732984293 13.69253025936599, -89.19976649214659 13.69288760806916, -89.19912146596859 13.69315561959654, -89.19847643979058 13.69342363112392, -89.19829214659686 13.69351296829971, -89.19764712041885 13.69378097982709, -89.19700209424084 13.69404899135447, -89.19681780104712 13.69413832853026, -89.19617277486911 13.69440634005764, -89.19552774869111 13.69467435158501, -89.19534345549738 13.69476368876081, -89.19469842931937 13.69503170028818, -89.19405340314137 13.69529971181556, -89.19386910994764 13.69538904899135, -89.19322408376964 13.69565706051873, -89.19303979057591 13.69574639769452, -89.1923947643979 13.6960144092219, -89.1917497382199 13.69628242074928, -89.19156544502617 13.69637175792507, -89.19092041884817 13.69663976945245, -89.19027539267016 13.69690778097983, -89.19009109947643 13.69699711815562, -89.18944607329843 13.697265129683, -89.18880104712042 13.69753314121037, -89.18861675392669 13.69762247838617, -89.18797172774869 13.69789048991355, -89.18732670157068 13.69815850144092, -89.18714240837696 13.69824783861671, -89.18668167539266 13.6984265129683, -89.18658952879581 13.6984265129683, -89.18649738219895 13.6984265129683, -89.18640523560209 13.6984265129683, -89.18631308900524 13.6984265129683, -89.18566806282722 13.69815850144092, -89.18502303664921 13.69789048991355, -89.18456230366492 13.69771181556196, -89.18391727748691 13.69744380403458, -89.18327225130889 13.6971757925072, -89.18262722513089 13.69690778097983, -89.1790335078534 13.69672910662824, -89.17875706806282 13.69681844380403, -89.17848062827225 13.69690778097983, -89.17820418848167 13.69699711815562, -89.1779277486911 13.69708645533141, -89.17765130890052 13.6971757925072, -89.17700628272252 13.69735446685879, -89.17672984293193 13.69744380403458, -89.17645340314137 13.69753314121037, -89.17617696335078 13.69762247838617, -89.1759005235602 13.69771181556196, -89.17562408376963 13.69780115273775, -89.17534764397905 13.69789048991355, -89.17470261780105 13.69806916426513, -89.1725832460733 13.69931988472622, -89.17249109947643 13.69940922190202, -89.17193821989528 13.70003458213256, -89.17184607329843 13.70012391930836, -89.17175392670157 13.70021325648415, -89.17120104712042 13.7008386167147, -89.17110890052356 13.70092795389049, -89.17101675392669 13.70101729106628, -89.17092460732984 13.70110662824207, -89.17037172774869 13.70173198847262, -89.17027958115183 13.70182132564842, -89.17018743455498 13.70191066282421, -89.17009528795812 13.702, -89.17000314136125 13.702, -89.16991099476439 13.702, -89.16981884816754 13.702, -89.16972670157068 13.702, -89.16963455497383 13.702, -89.16954240837696 13.702, -89.1694502617801 13.702, -89.16935811518324 13.702, -89.16926596858639 13.702, -89.16917382198953 13.702, -89.16908167539266 13.702, -89.16898952879581 13.702, -89.16889738219895 13.702, -89.16880523560209 13.702, -89.16871308900524 13.702, -89.16862094240837 13.702, -89.16852879581151 13.702, -89.16843664921466 13.702, -89.1683445026178 13.702, -89.16825235602094 13.702, -89.16816020942409 13.702, -89.16806806282722 13.702, -89.16797591623036 13.702, -89.16788376963351 13.702, -89.16779162303665 13.702, -89.16769947643979 13.702, -89.16760732984292 13.702, -89.16751518324607 13.702, -89.16742303664921 13.702, -89.16733089005236 13.702, -89.1672387434555 13.702, -89.16714659685863 13.702, -89.16705445026177 13.702, -89.16696230366492 13.702, -89.16687015706806 13.702, -89.16677801047121 13.702, -89.16668586387435 13.702, -89.16659371727748 13.702, -89.16650157068062 13.702, -89.16640942408377 13.702, -89.16631727748691 13.702, -89.16622513089006 13.702, -89.16613298429318 13.702, -89.16604083769633 13.702, -89.16594869109947 13.702, -89.16585654450262 13.702, -89.16576439790576 13.702, -89.16567225130889 13.702, -89.16558010471203 13.702, -89.16548795811518 13.702, -89.16539581151832 13.702, -89.16530366492147 13.702, -89.16521151832461 13.702, -89.16511937172774 13.702, -89.16502722513088 13.702, -89.16493507853403 13.702, -89.16484293193717 13.702, -89.16475078534032 13.702, -89.16465863874345 13.702, -89.16456649214659 13.702, -89.16447434554973 13.702, -89.16438219895288 13.702, -89.16429005235602 13.702, -89.16419790575915 13.702, -89.1641057591623 13.702, -89.16401361256544 13.702, -89.16392146596858 13.702, -89.16382931937173 13.702, -89.16373717277487 13.702, -89.163645026178 13.702, -89.16355287958115 13.702, -89.16346073298429 13.702, -89.16336858638743 13.702, -89.16327643979058 13.702, -89.16318429319371 13.702, -89.16309214659685 13.702, -89.163 13.702, -89.163 13.70191066282421, -89.163 13.70182132564842, -89.163 13.70173198847262, -89.163 13.70164265129683, -89.163 13.70155331412104, -89.163 13.70146397694525, -89.163 13.70137463976945, -89.163 13.70128530259366, -89.163 13.70119596541787, -89.163 13.70110662824207, -89.163 13.70101729106628, -89.163 13.70092795389049, -89.163 13.7008386167147, -89.163 13.7007492795389, -89.163 13.70065994236311, -89.163 13.70057060518732, -89.163 13.70048126801153, -89.163 13.70039193083574, -89.163 13.70030259365994, -89.163 13.70021325648415, -89.163 13.70012391930836, -89.163 13.70003458213256, -89.163 13.69994524495677, -89.163 13.69985590778098, -89.163 13.69976657060519, -89.163 13.69967723342939, -89.163 13.6995878962536, -89.163 13.69949855907781, -89.163 13.69940922190202, -89.163 13.69931988472622, -89.163 13.69923054755043, -89.163 13.69914121037464, -89.163 13.69905187319885, -89.163 13.69896253602306, -89.163 13.69887319884726, -89.163 13.69878386167147, -89.163 13.69869452449568, -89.163 13.69860518731988, -89.163 13.69851585014409, -89.163 13.6984265129683, -89.163 13.69833717579251, -89.163 13.69824783861671, -89.163 13.69815850144092, -89.163 13.69806916426513, -89.163 13.69797982708934, -89.163 13.69789048991355, -89.163 13.69780115273775, -89.163 13.69771181556196, -89.163 13.69762247838617, -89.163 13.69753314121037, -89.163 13.69744380403458, -89.163 13.69735446685879, -89.163 13.697265129683, -89.163 13.6971757925072, -89.163 13.69708645533141, -89.163 13.69699711815562, -89.163 13.69690778097983, -89.163 13.69681844380403, -89.163 13.69672910662824, -89.163 13.69663976945245, -89.163 13.69655043227666, -89.163 13.69646109510087, -89.163 13.69637175792507, -89.163 13.69628242074928, -89.163 13.69619308357349, -89.163 13.69610374639769, -89.163 13.6960144092219, -89.163 13.69592507204611, -89.163 13.69583573487032, -89.163 13.69574639769452, -89.163 13.69565706051873, -89.163 13.69556772334294, -89.163 13.69547838616715, -89.163 13.69538904899135, -89.163 13.69529971181556, -89.163 13.69521037463977, -89.163 13.69512103746398, -89.163 13.69503170028818, -89.163 13.69494236311239, -89.163 13.6948530259366, -89.163 13.69476368876081, -89.163 13.69467435158501, -89.163 13.69458501440922, -89.163 13.69449567723343, -89.163 13.69440634005764, -89.163 13.69431700288184, -89.163 13.69422766570605, -89.163 13.69413832853026, -89.163 13.69404899135447, -89.163 13.69395965417867, -89.163 13.69387031700288, -89.163 13.69378097982709, -89.163 13.6936916426513, -89.163 13.6936023054755, -89.163 13.69351296829971, -89.163 13.69342363112392, -89.163 13.69333429394813, -89.163 13.69324495677233, -89.16327643979058 13.69306628242075, -89.16355287958115 13.69288760806916, -89.16382931937173 13.69270893371758, -89.1641057591623 13.69253025936599, -89.16438219895288 13.69235158501441, -89.16465863874345 13.69217291066282, -89.16493507853403 13.69199423631124, -89.16521151832461 13.69181556195965, -89.16548795811518 13.69163688760807, -89.16576439790576 13.69145821325648, -89.16604083769633 13.6912795389049, -89.16631727748691 13.69110086455331, -89.16659371727748 13.69092219020173, -89.16677801047121 13.69083285302594, -89.16696230366492 13.69074351585014, -89.16714659685863 13.69065417867435, -89.16733089005236 13.69056484149856, -89.16797591623036 13.69020749279539, -89.16816020942409 13.6901181556196, -89.1683445026178 13.6900288184438, -89.16852879581151 13.68993948126801, -89.16871308900524 13.68985014409222, -89.16889738219895 13.68976080691643, -89.16908167539266 13.68967146974063, -89.16972670157068 13.68931412103746, -89.16991099476439 13.68922478386167, -89.17009528795812 13.68913544668588, -89.17027958115183 13.68904610951009, -89.17046387434554 13.68895677233429, -89.17064816753927 13.6888674351585, -89.17083246073298 13.68877809798271, -89.17147748691099 13.68842074927954, -89.17166178010471 13.68833141210375, -89.17350471204188 13.68681268011527, -89.17378115183246 13.6864553314121, -89.17405759162304 13.68609798270893, -89.17442617801046 13.68565129682997, -89.17470261780105 13.6852939481268, -89.17507120418848 13.68484726224784, -89.17534764397905 13.68448991354467, -89.17571623036649 13.68404322766571, -89.17580837696335 13.68395389048991, -89.17599267015707 13.68386455331412, -89.17617696335078 13.68377521613833, -89.1763612565445 13.68368587896254, -89.17654554973822 13.68359654178674, -89.17672984293193 13.68350720461095, -89.17691413612565 13.68341786743516, -89.17709842931937 13.68332853025936, -89.17728272251308 13.68323919308357, -89.17746701570681 13.68314985590778, -89.17765130890052 13.68306051873199, -89.17783560209423 13.6829711815562, -89.17801989528796 13.6828818443804, -89.17820418848167 13.68279250720461, -89.17884921465968 13.68252449567723, -89.1790335078534 13.68243515850144, -89.17921780104712 13.68234582132565, -89.17940209424084 13.68225648414986, -89.17958638743455 13.68216714697406, -89.17977068062827 13.68207780979827, -89.17995497382199 13.68198847262248, -89.1801392670157 13.68189913544669, -89.18032356020942 13.68180979827089, -89.18050785340314 13.6817204610951, -89.18069214659685 13.68163112391931, -89.18087643979058 13.68154178674352, -89.18152146596859 13.68127377521614, -89.1817057591623 13.68118443804035, -89.18189005235602 13.68109510086455, -89.18198219895288 13.68109510086455, -89.18207434554974 13.68109510086455, -89.18216649214659 13.68109510086455, -89.18225863874345 13.68109510086455, -89.18235078534032 13.68109510086455, -89.18244293193717 13.68109510086455, -89.18253507853403 13.68109510086455, -89.18262722513089 13.68109510086455, -89.18271937172774 13.68109510086455, -89.18281151832461 13.68109510086455, -89.18290366492147 13.68109510086455, -89.18299581151832 13.68109510086455, -89.18308795811518 13.68109510086455, -89.18318010471204 13.68109510086455, -89.18327225130889 13.68109510086455, -89.18336439790576 13.68109510086455, -89.18345654450262 13.68109510086455, -89.18354869109947 13.68109510086455, -89.18364083769633 13.68109510086455, -89.18373298429319 13.68109510086455, -89.18382513089006 13.68109510086455, -89.18806387434554 13.67868299711816, -89.18843246073298 13.67805763688761, -89.18880104712042 13.67743227665706, -89.18889319371728 13.67725360230548, -89.18926178010472 13.67662824207493, -89.18963036649214 13.67600288184438, -89.18972251308901 13.67582420749279, -89.19009109947643 13.67519884726225, -89.19045968586387 13.6745734870317, -89.19055183246073 13.67439481268011, -89.19092041884817 13.67376945244957, -89.19119685863875 13.6733227665706, -89.19138115183246 13.67323342939481, -89.19156544502617 13.67314409221902, -89.1917497382199 13.67305475504323, -89.1923947643979 13.67278674351585, -89.19257905759163 13.67269740634006, -89.19276335078534 13.67260806916426, -89.19294764397905 13.67251873198847, -89.19313193717278 13.67242939481268, -89.19331623036649 13.67234005763689, -89.1935005235602 13.6722507204611, -89.19368481675393 13.6721613832853, -89.19386910994764 13.67207204610951, -89.19405340314137 13.67198270893372, -89.19469842931937 13.67171469740634, -89.19488272251309 13.67162536023055, -89.19506701570681 13.67153602305475, -89.19525130890052 13.67144668587896, -89.19543560209424 13.67135734870317, -89.19561989528796 13.67126801152738, -89.19580418848167 13.67117867435158, -89.19598848167539 13.67108933717579, -89.19617277486911 13.671, -89.19626492146597 13.671, -89.19635706806282 13.671, -89.19644921465968 13.671, -89.19654136125655 13.671, -89.19663350785341 13.671, -89.19672565445026 13.671, -89.19681780104712 13.671, -89.19690994764397 13.671, -89.19700209424084 13.671, -89.1970942408377 13.671, -89.19718638743456 13.671, -89.19727853403141 13.671, -89.19737068062827 13.671, -89.19746282722512 13.671, -89.19755497382199 13.671, -89.19764712041885 13.671, -89.19773926701571 13.671, -89.19783141361256 13.671, -89.19792356020942 13.671, -89.19801570680629 13.671, -89.19810785340314 13.671, -89.1982 13.671, -89.19829214659686 13.671, -89.19838429319371 13.671, -89.19847643979058 13.671, -89.19856858638744 13.671, -89.19866073298429 13.671, -89.19875287958115 13.671, -89.19884502617801 13.671, -89.19893717277488 13.671, -89.19902931937173 13.671, -89.19912146596859 13.671, -89.19921361256544 13.671, -89.1993057591623 13.671, -89.19939790575916 13.671, -89.19949005235603 13.671, -89.19958219895288 13.671, -89.19967434554974 13.671, -89.19976649214659 13.671, -89.19985863874345 13.671, -89.19995078534032 13.671, -89.20004293193718 13.671, -89.20013507853403 13.671, -89.20022722513089 13.671, -89.20031937172774 13.671, -89.20041151832461 13.671, -89.20050366492147 13.671, -89.20059581151833 13.671, -89.20068795811518 13.671, -89.20078010471204 13.671, -89.20087225130889 13.671, -89.20096439790576 13.671, -89.20105654450262 13.671, -89.20114869109948 13.671, -89.20124083769633 13.671, -89.20133298429319 13.671, -89.20142513089006 13.671, -89.20151727748691 13.671, -89.20160942408377 13.671, -89.20170157068063 13.671, -89.20179371727748 13.671, -89.20188586387435 13.671, -89.20197801047121 13.671, -89.20207015706806 13.671, -89.20216230366492 13.671, -89.20225445026178 13.671, -89.20234659685863 13.671, -89.2024387434555 13.671, -89.20253089005236 13.671, -89.20262303664921 13.671, -89.20271518324607 13.671, -89.20280732984293 13.671, -89.2028994764398 13.671, -89.20299162303665 13.671, -89.20308376963351 13.671, -89.20317591623036 13.671, -89.20326806282722 13.671, -89.20336020942409 13.671, -89.20345235602095 13.671, -89.2035445026178 13.671, -89.20363664921466 13.671, -89.20372879581151 13.671, -89.20382094240837 13.671, -89.20391308900524 13.671, -89.2040052356021 13.671, -89.20409738219895 13.671, -89.20418952879581 13.671, -89.20428167539266 13.671, -89.20437382198953 13.671, -89.20446596858639 13.671, -89.20455811518325 13.671, -89.2046502617801 13.671, -89.20474240837696 13.671, -89.20483455497383 13.671, -89.20492670157068 13.671, -89.20501884816754 13.671, -89.2051109947644 13.671, -89.20520314136125 13.671, -89.20529528795812 13.671, -89.20538743455498 13.671, -89.20547958115183 13.671, -89.20557172774869 13.671, -89.20566387434555 13.671, -89.2057560209424 13.671, -89.20584816753927 13.671, -89.20594031413613 13.671, -89.20603246073298 13.671, -89.20612460732984 13.671, -89.2062167539267 13.671, -89.20640104712042 13.67108933717579, -89.20658534031413 13.67117867435158, -89.20676963350786 13.67126801152738, -89.20695392670157 13.67135734870317, -89.20713821989528 13.67144668587896, -89.20723036649214 13.67153602305475, -89.20723036649214 13.67162536023055, -89.20723036649214 13.67171469740634, -89.20723036649214 13.67180403458213, -89.20704607329843 13.67242939481268, -89.20686178010472 13.67305475504323, -89.20667748691099 13.67368011527378, -89.20658534031413 13.67394812680115, -89.20640104712042 13.6745734870317, -89.21036335078534 13.68118443804035, -89.21091623036649 13.68127377521614, -89.21146910994764 13.68136311239193, -89.21211413612565 13.68145244956772, -89.21266701570681 13.68154178674352, -89.21321989528796 13.68163112391931, -89.21377277486911 13.6817204610951, -89.21441780104712 13.68180979827089, -89.2146942408377 13.68189913544669, -89.21487853403141 13.68198847262248, -89.215154973822 13.68216714697406, -89.21543141361256 13.68234582132565, -89.21589214659686 13.68261383285303, -89.21616858638744 13.68279250720461, -89.21644502617801 13.6829711815562, -89.21672146596859 13.68314985590778, -89.21699790575916 13.68332853025936, -89.21727434554974 13.68350720461095, -89.21773507853403 13.68377521613833, -89.21801151832462 13.68395389048991, -89.21828795811518 13.6841325648415, -89.21856439790577 13.68431123919308, -89.21884083769633 13.68448991354467, -89.21911727748692 13.68466858789625, -89.22031518324607 13.68520461095101, -89.22059162303665 13.6852939481268, -89.22086806282722 13.68538328530259, -89.2211445026178 13.68547262247839, -89.22142094240837 13.68556195965418, -89.22169738219895 13.68565129682997, -89.22197382198954 13.68574063400576, -89.2222502617801 13.68582997118156, -89.22252670157069 13.68591930835735, -89.22280314136125 13.68600864553314, -89.22307958115184 13.68609798270893, -89.2233560209424 13.68618731988473, -89.22363246073299 13.68627665706052, -89.22390890052357 13.68636599423631, -89.22768691099476 13.68618731988473, -89.22833193717278 13.68591930835735, -89.22851623036649 13.68582997118156, -89.22870052356021 13.68574063400576, -89.22888481675393 13.68565129682997, -89.22906910994764 13.68556195965418, -89.22925340314137 13.68547262247839, -89.22943769633508 13.68538328530259, -89.22962198952879 13.6852939481268, -89.22980628272252 13.68520461095101, -89.22999057591623 13.68511527377522, -89.23017486910994 13.68502593659942, -89.23035916230367 13.68493659942363, -89.23054345549738 13.68484726224784, -89.23072774869111 13.68475792507205, -89.23091204188482 13.68466858789625, -89.23109633507853 13.68457925072046, -89.23174136125655 13.68431123919308, -89.23192565445027 13.68422190201729, -89.23358429319372 13.6828818443804, -89.23413717277488 13.68225648414986, -89.23469005235603 13.68163112391931, -89.23478219895289 13.68154178674352, -89.23533507853404 13.68091642651297, -89.23588795811519 13.68029106628242, -89.23598010471204 13.68020172910663, -89.2360722513089 13.68011239193084, -89.23616439790577 13.68002305475504, -89.23625654450262 13.67993371757925, -89.23634869109948 13.67984438040346, -89.23644083769634 13.67975504322767, -89.23653298429319 13.67966570605187, -89.23662513089006 13.67957636887608, -89.23671727748692 13.67948703170029, -89.23680942408377 13.67939769452449, -89.23690157068063 13.6793083573487, -89.23699371727749 13.67921902017291, -89.23754659685864 13.67859365994236, -89.23763874345551 13.67850432276657, -89.23773089005236 13.67841498559078, -89.23782303664922 13.67832564841498, -89.23791518324607 13.67823631123919, -89.23800732984293 13.6781469740634, -89.2380994764398 13.67805763688761, -89.23819162303666 13.67796829971181, -89.23828376963351 13.67787896253602, -89.23837591623037 13.67778962536023, -89.23846806282722 13.67770028818444, -89.23856020942409 13.67761095100865, -89.23865235602095 13.67752161383285, -89.23874450261781 13.67743227665706, -89.23883664921466 13.67734293948127, -89.23938952879581 13.67671757925072, -89.23948167539267 13.67662824207493, -89.23957382198954 13.67653890489913, -89.23966596858639 13.67644956772334, -89.23975811518325 13.67636023054755, -89.23985026178011 13.67627089337176, -89.23994240837696 13.67618155619597, -89.24003455497383 13.67609221902017, -89.24012670157069 13.67600288184438, -89.24021884816754 13.67591354466859, -89.2403109947644 13.67582420749279, -89.24040314136126 13.675734870317, -89.24049528795813 13.67564553314121, -89.24058743455498 13.67555619596542, -89.24114031413613 13.67493083573487, -89.24123246073299 13.67484149855908, -89.24132460732984 13.67475216138329, -89.2414167539267 13.67466282420749, -89.24150890052357 13.6745734870317, -89.24160104712043 13.67448414985591, -89.24169319371728 13.67439481268011, -89.24178534031414 13.67430547550432, -89.24298324607331 13.67251873198847, -89.24316753926702 13.67207204610951, -89.24335183246073 13.67162536023055, -89.24353612565446 13.67117867435158, -89.24362827225131 13.671, -89.24372041884817 13.671, -89.24381256544503 13.671, -89.24390471204188 13.671, -89.24399685863875 13.671, -89.24408900523561 13.671, -89.24418115183246 13.671, -89.24427329842932 13.671, -89.24436544502618 13.671, -89.24445759162305 13.671, -89.2445497382199 13.671, -89.24464188481676 13.671, -89.24473403141361 13.671, -89.24482617801047 13.671, -89.24491832460734 13.671, -89.2450104712042 13.671, -89.24510261780105 13.671, -89.24519476439791 13.671, -89.24528691099476 13.671, -89.24537905759163 13.671, -89.24547120418849 13.671, -89.24556335078535 13.671, -89.2456554973822 13.671, -89.24574764397906 13.671, -89.24583979057591 13.671, -89.24593193717278 13.671, -89.24602408376964 13.671, -89.2461162303665 13.671, -89.24620837696335 13.671, -89.24630052356021 13.671, -89.24639267015708 13.671, -89.24648481675393 13.671, -89.24657696335079 13.671, -89.24666910994765 13.671, -89.2467612565445 13.671, -89.24685340314137 13.671, -89.24694554973823 13.671, -89.24703769633508 13.671, -89.24712984293194 13.671, -89.2472219895288 13.671, -89.24731413612565 13.671, -89.24740628272252 13.671, -89.24749842931938 13.671, -89.24759057591623 13.671, -89.24768272251309 13.671, -89.24777486910995 13.671, -89.24786701570682 13.671, -89.24795916230367 13.671, -89.24805130890053 13.671, -89.24814345549738 13.671, -89.24823560209424 13.671, -89.24832774869111 13.671, -89.24841989528797 13.671, -89.24851204188482 13.671, -89.24860418848168 13.671, -89.24869633507853 13.671, -89.24878848167539 13.671, -89.24888062827226 13.671, -89.24897277486912 13.671, -89.24906492146597 13.671, -89.24915706806283 13.671, -89.24924921465968 13.671, -89.24934136125655 13.671, -89.24943350785341 13.671, -89.24952565445027 13.671, -89.24961780104712 13.671, -89.24970994764398 13.671, -89.24980209424085 13.671, -89.2498942408377 13.671, -89.24998638743456 13.671, -89.25007853403142 13.671, -89.25017068062827 13.671, -89.25026282722513 13.671, -89.250354973822 13.671, -89.25044712041885 13.671, -89.25053926701571 13.671, -89.25063141361257 13.671, -89.25072356020942 13.671, -89.25081570680629 13.671, -89.25090785340315 13.671))' + risk_shape = "POLYGON ((-89.25090785340315 13.671, -89.251 13.671, -89.251 13.67108933717579, -89.251 13.67117867435158, -89.251 13.67126801152738, -89.251 13.67135734870317, -89.251 13.67144668587896, -89.251 13.67153602305475, -89.251 13.67162536023055, -89.251 13.67171469740634, -89.251 13.67180403458213, -89.251 13.67189337175792, -89.251 13.67198270893372, -89.251 13.67207204610951, -89.251 13.6721613832853, -89.251 13.6722507204611, -89.251 13.67234005763689, -89.251 13.67242939481268, -89.251 13.67251873198847, -89.251 13.67260806916426, -89.251 13.67269740634006, -89.251 13.67278674351585, -89.251 13.67287608069164, -89.251 13.67296541786743, -89.251 13.67305475504323, -89.251 13.67314409221902, -89.251 13.67323342939481, -89.251 13.6733227665706, -89.251 13.6734121037464, -89.251 13.67350144092219, -89.251 13.67359077809798, -89.251 13.67368011527378, -89.251 13.67376945244957, -89.251 13.67385878962536, -89.251 13.67394812680115, -89.251 13.67403746397694, -89.251 13.67412680115274, -89.251 13.67421613832853, -89.251 13.67430547550432, -89.251 13.67439481268011, -89.251 13.67448414985591, -89.251 13.6745734870317, -89.251 13.67466282420749, -89.251 13.67475216138329, -89.251 13.67484149855908, -89.251 13.67493083573487, -89.251 13.67502017291066, -89.251 13.67510951008645, -89.251 13.67519884726225, -89.251 13.67528818443804, -89.251 13.67537752161383, -89.251 13.67546685878962, -89.251 13.67555619596542, -89.251 13.67564553314121, -89.251 13.675734870317, -89.251 13.67582420749279, -89.251 13.67591354466859, -89.251 13.67600288184438, -89.251 13.67609221902017, -89.251 13.67618155619597, -89.251 13.67627089337176, -89.251 13.67636023054755, -89.251 13.67644956772334, -89.251 13.67653890489913, -89.251 13.67662824207493, -89.251 13.67671757925072, -89.251 13.67680691642651, -89.251 13.6768962536023, -89.251 13.6769855907781, -89.251 13.67707492795389, -89.251 13.67716426512968, -89.251 13.67725360230548, -89.251 13.67734293948127, -89.251 13.67743227665706, -89.251 13.67752161383285, -89.251 13.67761095100865, -89.251 13.67770028818444, -89.251 13.67778962536023, -89.251 13.67787896253602, -89.251 13.67796829971181, -89.251 13.67805763688761, -89.25090785340315 13.67832564841498, -89.25081570680629 13.67850432276657, -89.25072356020942 13.67868299711816, -89.25063141361257 13.67886167146974, -89.250354973822 13.67921902017291, -89.25017068062827 13.67948703170029, -89.2498942408377 13.67984438040346, -89.24961780104712 13.68020172910663, -89.24934136125655 13.6805590778098, -89.24915706806283 13.68082708933717, -89.24888062827226 13.68118443804035, -89.24860418848168 13.68154178674352, -89.24832774869111 13.68189913544669, -89.24814345549738 13.68216714697406, -89.24786701570682 13.68252449567723, -89.24759057591623 13.6828818443804, -89.24740628272252 13.68314985590778, -89.24712984293194 13.68350720461095, -89.24685340314137 13.68386455331412, -89.24657696335079 13.68422190201729, -89.24639267015708 13.68448991354467, -89.24556335078535 13.68556195965418, -89.24510261780105 13.68609798270893, -89.2450104712042 13.68618731988473, -89.24491832460734 13.68627665706052, -89.24436544502618 13.68690201729107, -89.24427329842932 13.68699135446686, -89.24372041884817 13.68761671469741, -89.24362827225131 13.6877060518732, -89.24353612565446 13.68779538904899, -89.24298324607331 13.68842074927954, -89.24289109947644 13.68851008645533, -89.24233821989529 13.68913544668588, -89.24224607329843 13.68922478386167, -89.24169319371728 13.68985014409222, -89.24160104712043 13.68993948126801, -89.24150890052357 13.6900288184438, -89.24095602094241 13.69065417867435, -89.24086387434555 13.69074351585014, -89.24077172774869 13.69083285302594, -89.24067958115184 13.69092219020173, -89.24058743455498 13.69101152737752, -89.24049528795813 13.69110086455331, -89.24040314136126 13.69119020172911, -89.2403109947644 13.6912795389049, -89.24021884816754 13.69136887608069, -89.23975811518325 13.69163688760807, -89.23929738219896 13.69190489913545, -89.23865235602095 13.69226224783862, -89.23819162303666 13.69253025936599, -89.23773089005236 13.69279827089337, -89.23708586387436 13.69315561959654, -89.23662513089006 13.69342363112392, -89.23616439790577 13.6936916426513, -89.23570366492147 13.69395965417867, -89.23505863874345 13.69431700288184, -89.23459790575916 13.69458501440922, -89.23413717277488 13.6948530259366, -89.23376858638744 13.69494236311239, -89.23321570680629 13.69503170028818, -89.23266282722513 13.69512103746398, -89.23220209424085 13.69521037463977, -89.23164921465968 13.69529971181556, -89.23109633507853 13.69538904899135, -89.23054345549738 13.69547838616715, -89.23008272251309 13.69556772334294, -89.22952984293194 13.69565706051873, -89.22897696335079 13.69574639769452, -89.22851623036649 13.69583573487032, -89.22741047120419 13.6960144092219, -89.22685759162304 13.69610374639769, -89.22621256544502 13.69619308357349, -89.22612041884817 13.69619308357349, -89.22602827225131 13.69619308357349, -89.22593612565446 13.69619308357349, -89.2258439790576 13.69619308357349, -89.22575183246073 13.69619308357349, -89.22565968586387 13.69619308357349, -89.22556753926702 13.69619308357349, -89.22547539267016 13.69619308357349, -89.22538324607331 13.69619308357349, -89.22529109947644 13.69619308357349, -89.22519895287958 13.69619308357349, -89.22510680628272 13.69619308357349, -89.22501465968587 13.69619308357349, -89.22492251308901 13.69619308357349, -89.22483036649214 13.69619308357349, -89.22473821989529 13.69619308357349, -89.22464607329843 13.69619308357349, -89.22455392670157 13.69619308357349, -89.22446178010472 13.69619308357349, -89.22436963350786 13.69619308357349, -89.22427748691099 13.69619308357349, -89.22418534031414 13.69619308357349, -89.22409319371728 13.69619308357349, -89.22400104712042 13.69619308357349, -89.22390890052357 13.69619308357349, -89.2238167539267 13.69619308357349, -89.22372460732984 13.69619308357349, -89.22363246073299 13.69619308357349, -89.22354031413613 13.69619308357349, -89.22344816753927 13.69619308357349, -89.2233560209424 13.69619308357349, -89.22326387434555 13.69619308357349, -89.22317172774869 13.69619308357349, -89.2222502617801 13.69628242074928, -89.22215811518325 13.69628242074928, -89.22206596858639 13.69628242074928, -89.22197382198954 13.69628242074928, -89.22188167539267 13.69628242074928, -89.22178952879581 13.69628242074928, -89.22169738219895 13.69628242074928, -89.2216052356021 13.69628242074928, -89.22151308900524 13.69628242074928, -89.22142094240837 13.69628242074928, -89.22132879581152 13.69628242074928, -89.22123664921466 13.69628242074928, -89.2211445026178 13.69628242074928, -89.22105235602095 13.69628242074928, -89.22096020942409 13.69628242074928, -89.22086806282722 13.69628242074928, -89.22077591623037 13.69628242074928, -89.22059162303665 13.69619308357349, -89.22031518324607 13.6960144092219, -89.21985445026178 13.69574639769452, -89.21957801047121 13.69556772334294, -89.21930157068063 13.69538904899135, -89.21902513089006 13.69521037463977, -89.21874869109948 13.69503170028818, -89.21828795811518 13.69476368876081, -89.21801151832462 13.69458501440922, -89.21773507853403 13.69440634005764, -89.21745863874345 13.69422766570605, -89.21699790575916 13.69395965417867, -89.21672146596859 13.69378097982709, -89.21644502617801 13.6936023054755, -89.21616858638744 13.69342363112392, -89.21589214659686 13.69324495677233, -89.21543141361256 13.69297694524496, -89.215154973822 13.69279827089337, -89.21091623036649 13.69226224783862, -89.21063979057591 13.69235158501441, -89.21036335078534 13.6924409221902, -89.21008691099476 13.69253025936599, -89.20981047120419 13.69261959654179, -89.2093497382199 13.69279827089337, -89.20907329842932 13.69288760806916, -89.20879685863875 13.69297694524496, -89.20852041884817 13.69306628242075, -89.2082439790576 13.69315561959654, -89.20815183246073 13.69315561959654, -89.20805968586387 13.69315561959654, -89.20796753926702 13.69315561959654, -89.20787539267016 13.69315561959654, -89.2077832460733 13.69315561959654, -89.20769109947643 13.69315561959654, -89.20695392670157 13.69306628242075, -89.20630890052357 13.69297694524496, -89.20557172774869 13.69288760806916, -89.20492670157068 13.69279827089337, -89.20418952879581 13.69270893371758, -89.2035445026178 13.69261959654179, -89.20280732984293 13.69253025936599, -89.19976649214659 13.69288760806916, -89.19912146596859 13.69315561959654, -89.19847643979058 13.69342363112392, -89.19829214659686 13.69351296829971, -89.19764712041885 13.69378097982709, -89.19700209424084 13.69404899135447, -89.19681780104712 13.69413832853026, -89.19617277486911 13.69440634005764, -89.19552774869111 13.69467435158501, -89.19534345549738 13.69476368876081, -89.19469842931937 13.69503170028818, -89.19405340314137 13.69529971181556, -89.19386910994764 13.69538904899135, -89.19322408376964 13.69565706051873, -89.19303979057591 13.69574639769452, -89.1923947643979 13.6960144092219, -89.1917497382199 13.69628242074928, -89.19156544502617 13.69637175792507, -89.19092041884817 13.69663976945245, -89.19027539267016 13.69690778097983, -89.19009109947643 13.69699711815562, -89.18944607329843 13.697265129683, -89.18880104712042 13.69753314121037, -89.18861675392669 13.69762247838617, -89.18797172774869 13.69789048991355, -89.18732670157068 13.69815850144092, -89.18714240837696 13.69824783861671, -89.18668167539266 13.6984265129683, -89.18658952879581 13.6984265129683, -89.18649738219895 13.6984265129683, -89.18640523560209 13.6984265129683, -89.18631308900524 13.6984265129683, -89.18566806282722 13.69815850144092, -89.18502303664921 13.69789048991355, -89.18456230366492 13.69771181556196, -89.18391727748691 13.69744380403458, -89.18327225130889 13.6971757925072, -89.18262722513089 13.69690778097983, -89.1790335078534 13.69672910662824, -89.17875706806282 13.69681844380403, -89.17848062827225 13.69690778097983, -89.17820418848167 13.69699711815562, -89.1779277486911 13.69708645533141, -89.17765130890052 13.6971757925072, -89.17700628272252 13.69735446685879, -89.17672984293193 13.69744380403458, -89.17645340314137 13.69753314121037, -89.17617696335078 13.69762247838617, -89.1759005235602 13.69771181556196, -89.17562408376963 13.69780115273775, -89.17534764397905 13.69789048991355, -89.17470261780105 13.69806916426513, -89.1725832460733 13.69931988472622, -89.17249109947643 13.69940922190202, -89.17193821989528 13.70003458213256, -89.17184607329843 13.70012391930836, -89.17175392670157 13.70021325648415, -89.17120104712042 13.7008386167147, -89.17110890052356 13.70092795389049, -89.17101675392669 13.70101729106628, -89.17092460732984 13.70110662824207, -89.17037172774869 13.70173198847262, -89.17027958115183 13.70182132564842, -89.17018743455498 13.70191066282421, -89.17009528795812 13.702, -89.17000314136125 13.702, -89.16991099476439 13.702, -89.16981884816754 13.702, -89.16972670157068 13.702, -89.16963455497383 13.702, -89.16954240837696 13.702, -89.1694502617801 13.702, -89.16935811518324 13.702, -89.16926596858639 13.702, -89.16917382198953 13.702, -89.16908167539266 13.702, -89.16898952879581 13.702, -89.16889738219895 13.702, -89.16880523560209 13.702, -89.16871308900524 13.702, -89.16862094240837 13.702, -89.16852879581151 13.702, -89.16843664921466 13.702, -89.1683445026178 13.702, -89.16825235602094 13.702, -89.16816020942409 13.702, -89.16806806282722 13.702, -89.16797591623036 13.702, -89.16788376963351 13.702, -89.16779162303665 13.702, -89.16769947643979 13.702, -89.16760732984292 13.702, -89.16751518324607 13.702, -89.16742303664921 13.702, -89.16733089005236 13.702, -89.1672387434555 13.702, -89.16714659685863 13.702, -89.16705445026177 13.702, -89.16696230366492 13.702, -89.16687015706806 13.702, -89.16677801047121 13.702, -89.16668586387435 13.702, -89.16659371727748 13.702, -89.16650157068062 13.702, -89.16640942408377 13.702, -89.16631727748691 13.702, -89.16622513089006 13.702, -89.16613298429318 13.702, -89.16604083769633 13.702, -89.16594869109947 13.702, -89.16585654450262 13.702, -89.16576439790576 13.702, -89.16567225130889 13.702, -89.16558010471203 13.702, -89.16548795811518 13.702, -89.16539581151832 13.702, -89.16530366492147 13.702, -89.16521151832461 13.702, -89.16511937172774 13.702, -89.16502722513088 13.702, -89.16493507853403 13.702, -89.16484293193717 13.702, -89.16475078534032 13.702, -89.16465863874345 13.702, -89.16456649214659 13.702, -89.16447434554973 13.702, -89.16438219895288 13.702, -89.16429005235602 13.702, -89.16419790575915 13.702, -89.1641057591623 13.702, -89.16401361256544 13.702, -89.16392146596858 13.702, -89.16382931937173 13.702, -89.16373717277487 13.702, -89.163645026178 13.702, -89.16355287958115 13.702, -89.16346073298429 13.702, -89.16336858638743 13.702, -89.16327643979058 13.702, -89.16318429319371 13.702, -89.16309214659685 13.702, -89.163 13.702, -89.163 13.70191066282421, -89.163 13.70182132564842, -89.163 13.70173198847262, -89.163 13.70164265129683, -89.163 13.70155331412104, -89.163 13.70146397694525, -89.163 13.70137463976945, -89.163 13.70128530259366, -89.163 13.70119596541787, -89.163 13.70110662824207, -89.163 13.70101729106628, -89.163 13.70092795389049, -89.163 13.7008386167147, -89.163 13.7007492795389, -89.163 13.70065994236311, -89.163 13.70057060518732, -89.163 13.70048126801153, -89.163 13.70039193083574, -89.163 13.70030259365994, -89.163 13.70021325648415, -89.163 13.70012391930836, -89.163 13.70003458213256, -89.163 13.69994524495677, -89.163 13.69985590778098, -89.163 13.69976657060519, -89.163 13.69967723342939, -89.163 13.6995878962536, -89.163 13.69949855907781, -89.163 13.69940922190202, -89.163 13.69931988472622, -89.163 13.69923054755043, -89.163 13.69914121037464, -89.163 13.69905187319885, -89.163 13.69896253602306, -89.163 13.69887319884726, -89.163 13.69878386167147, -89.163 13.69869452449568, -89.163 13.69860518731988, -89.163 13.69851585014409, -89.163 13.6984265129683, -89.163 13.69833717579251, -89.163 13.69824783861671, -89.163 13.69815850144092, -89.163 13.69806916426513, -89.163 13.69797982708934, -89.163 13.69789048991355, -89.163 13.69780115273775, -89.163 13.69771181556196, -89.163 13.69762247838617, -89.163 13.69753314121037, -89.163 13.69744380403458, -89.163 13.69735446685879, -89.163 13.697265129683, -89.163 13.6971757925072, -89.163 13.69708645533141, -89.163 13.69699711815562, -89.163 13.69690778097983, -89.163 13.69681844380403, -89.163 13.69672910662824, -89.163 13.69663976945245, -89.163 13.69655043227666, -89.163 13.69646109510087, -89.163 13.69637175792507, -89.163 13.69628242074928, -89.163 13.69619308357349, -89.163 13.69610374639769, -89.163 13.6960144092219, -89.163 13.69592507204611, -89.163 13.69583573487032, -89.163 13.69574639769452, -89.163 13.69565706051873, -89.163 13.69556772334294, -89.163 13.69547838616715, -89.163 13.69538904899135, -89.163 13.69529971181556, -89.163 13.69521037463977, -89.163 13.69512103746398, -89.163 13.69503170028818, -89.163 13.69494236311239, -89.163 13.6948530259366, -89.163 13.69476368876081, -89.163 13.69467435158501, -89.163 13.69458501440922, -89.163 13.69449567723343, -89.163 13.69440634005764, -89.163 13.69431700288184, -89.163 13.69422766570605, -89.163 13.69413832853026, -89.163 13.69404899135447, -89.163 13.69395965417867, -89.163 13.69387031700288, -89.163 13.69378097982709, -89.163 13.6936916426513, -89.163 13.6936023054755, -89.163 13.69351296829971, -89.163 13.69342363112392, -89.163 13.69333429394813, -89.163 13.69324495677233, -89.16327643979058 13.69306628242075, -89.16355287958115 13.69288760806916, -89.16382931937173 13.69270893371758, -89.1641057591623 13.69253025936599, -89.16438219895288 13.69235158501441, -89.16465863874345 13.69217291066282, -89.16493507853403 13.69199423631124, -89.16521151832461 13.69181556195965, -89.16548795811518 13.69163688760807, -89.16576439790576 13.69145821325648, -89.16604083769633 13.6912795389049, -89.16631727748691 13.69110086455331, -89.16659371727748 13.69092219020173, -89.16677801047121 13.69083285302594, -89.16696230366492 13.69074351585014, -89.16714659685863 13.69065417867435, -89.16733089005236 13.69056484149856, -89.16797591623036 13.69020749279539, -89.16816020942409 13.6901181556196, -89.1683445026178 13.6900288184438, -89.16852879581151 13.68993948126801, -89.16871308900524 13.68985014409222, -89.16889738219895 13.68976080691643, -89.16908167539266 13.68967146974063, -89.16972670157068 13.68931412103746, -89.16991099476439 13.68922478386167, -89.17009528795812 13.68913544668588, -89.17027958115183 13.68904610951009, -89.17046387434554 13.68895677233429, -89.17064816753927 13.6888674351585, -89.17083246073298 13.68877809798271, -89.17147748691099 13.68842074927954, -89.17166178010471 13.68833141210375, -89.17350471204188 13.68681268011527, -89.17378115183246 13.6864553314121, -89.17405759162304 13.68609798270893, -89.17442617801046 13.68565129682997, -89.17470261780105 13.6852939481268, -89.17507120418848 13.68484726224784, -89.17534764397905 13.68448991354467, -89.17571623036649 13.68404322766571, -89.17580837696335 13.68395389048991, -89.17599267015707 13.68386455331412, -89.17617696335078 13.68377521613833, -89.1763612565445 13.68368587896254, -89.17654554973822 13.68359654178674, -89.17672984293193 13.68350720461095, -89.17691413612565 13.68341786743516, -89.17709842931937 13.68332853025936, -89.17728272251308 13.68323919308357, -89.17746701570681 13.68314985590778, -89.17765130890052 13.68306051873199, -89.17783560209423 13.6829711815562, -89.17801989528796 13.6828818443804, -89.17820418848167 13.68279250720461, -89.17884921465968 13.68252449567723, -89.1790335078534 13.68243515850144, -89.17921780104712 13.68234582132565, -89.17940209424084 13.68225648414986, -89.17958638743455 13.68216714697406, -89.17977068062827 13.68207780979827, -89.17995497382199 13.68198847262248, -89.1801392670157 13.68189913544669, -89.18032356020942 13.68180979827089, -89.18050785340314 13.6817204610951, -89.18069214659685 13.68163112391931, -89.18087643979058 13.68154178674352, -89.18152146596859 13.68127377521614, -89.1817057591623 13.68118443804035, -89.18189005235602 13.68109510086455, -89.18198219895288 13.68109510086455, -89.18207434554974 13.68109510086455, -89.18216649214659 13.68109510086455, -89.18225863874345 13.68109510086455, -89.18235078534032 13.68109510086455, -89.18244293193717 13.68109510086455, -89.18253507853403 13.68109510086455, -89.18262722513089 13.68109510086455, -89.18271937172774 13.68109510086455, -89.18281151832461 13.68109510086455, -89.18290366492147 13.68109510086455, -89.18299581151832 13.68109510086455, -89.18308795811518 13.68109510086455, -89.18318010471204 13.68109510086455, -89.18327225130889 13.68109510086455, -89.18336439790576 13.68109510086455, -89.18345654450262 13.68109510086455, -89.18354869109947 13.68109510086455, -89.18364083769633 13.68109510086455, -89.18373298429319 13.68109510086455, -89.18382513089006 13.68109510086455, -89.18806387434554 13.67868299711816, -89.18843246073298 13.67805763688761, -89.18880104712042 13.67743227665706, -89.18889319371728 13.67725360230548, -89.18926178010472 13.67662824207493, -89.18963036649214 13.67600288184438, -89.18972251308901 13.67582420749279, -89.19009109947643 13.67519884726225, -89.19045968586387 13.6745734870317, -89.19055183246073 13.67439481268011, -89.19092041884817 13.67376945244957, -89.19119685863875 13.6733227665706, -89.19138115183246 13.67323342939481, -89.19156544502617 13.67314409221902, -89.1917497382199 13.67305475504323, -89.1923947643979 13.67278674351585, -89.19257905759163 13.67269740634006, -89.19276335078534 13.67260806916426, -89.19294764397905 13.67251873198847, -89.19313193717278 13.67242939481268, -89.19331623036649 13.67234005763689, -89.1935005235602 13.6722507204611, -89.19368481675393 13.6721613832853, -89.19386910994764 13.67207204610951, -89.19405340314137 13.67198270893372, -89.19469842931937 13.67171469740634, -89.19488272251309 13.67162536023055, -89.19506701570681 13.67153602305475, -89.19525130890052 13.67144668587896, -89.19543560209424 13.67135734870317, -89.19561989528796 13.67126801152738, -89.19580418848167 13.67117867435158, -89.19598848167539 13.67108933717579, -89.19617277486911 13.671, -89.19626492146597 13.671, -89.19635706806282 13.671, -89.19644921465968 13.671, -89.19654136125655 13.671, -89.19663350785341 13.671, -89.19672565445026 13.671, -89.19681780104712 13.671, -89.19690994764397 13.671, -89.19700209424084 13.671, -89.1970942408377 13.671, -89.19718638743456 13.671, -89.19727853403141 13.671, -89.19737068062827 13.671, -89.19746282722512 13.671, -89.19755497382199 13.671, -89.19764712041885 13.671, -89.19773926701571 13.671, -89.19783141361256 13.671, -89.19792356020942 13.671, -89.19801570680629 13.671, -89.19810785340314 13.671, -89.1982 13.671, -89.19829214659686 13.671, -89.19838429319371 13.671, -89.19847643979058 13.671, -89.19856858638744 13.671, -89.19866073298429 13.671, -89.19875287958115 13.671, -89.19884502617801 13.671, -89.19893717277488 13.671, -89.19902931937173 13.671, -89.19912146596859 13.671, -89.19921361256544 13.671, -89.1993057591623 13.671, -89.19939790575916 13.671, -89.19949005235603 13.671, -89.19958219895288 13.671, -89.19967434554974 13.671, -89.19976649214659 13.671, -89.19985863874345 13.671, -89.19995078534032 13.671, -89.20004293193718 13.671, -89.20013507853403 13.671, -89.20022722513089 13.671, -89.20031937172774 13.671, -89.20041151832461 13.671, -89.20050366492147 13.671, -89.20059581151833 13.671, -89.20068795811518 13.671, -89.20078010471204 13.671, -89.20087225130889 13.671, -89.20096439790576 13.671, -89.20105654450262 13.671, -89.20114869109948 13.671, -89.20124083769633 13.671, -89.20133298429319 13.671, -89.20142513089006 13.671, -89.20151727748691 13.671, -89.20160942408377 13.671, -89.20170157068063 13.671, -89.20179371727748 13.671, -89.20188586387435 13.671, -89.20197801047121 13.671, -89.20207015706806 13.671, -89.20216230366492 13.671, -89.20225445026178 13.671, -89.20234659685863 13.671, -89.2024387434555 13.671, -89.20253089005236 13.671, -89.20262303664921 13.671, -89.20271518324607 13.671, -89.20280732984293 13.671, -89.2028994764398 13.671, -89.20299162303665 13.671, -89.20308376963351 13.671, -89.20317591623036 13.671, -89.20326806282722 13.671, -89.20336020942409 13.671, -89.20345235602095 13.671, -89.2035445026178 13.671, -89.20363664921466 13.671, -89.20372879581151 13.671, -89.20382094240837 13.671, -89.20391308900524 13.671, -89.2040052356021 13.671, -89.20409738219895 13.671, -89.20418952879581 13.671, -89.20428167539266 13.671, -89.20437382198953 13.671, -89.20446596858639 13.671, -89.20455811518325 13.671, -89.2046502617801 13.671, -89.20474240837696 13.671, -89.20483455497383 13.671, -89.20492670157068 13.671, -89.20501884816754 13.671, -89.2051109947644 13.671, -89.20520314136125 13.671, -89.20529528795812 13.671, -89.20538743455498 13.671, -89.20547958115183 13.671, -89.20557172774869 13.671, -89.20566387434555 13.671, -89.2057560209424 13.671, -89.20584816753927 13.671, -89.20594031413613 13.671, -89.20603246073298 13.671, -89.20612460732984 13.671, -89.2062167539267 13.671, -89.20640104712042 13.67108933717579, -89.20658534031413 13.67117867435158, -89.20676963350786 13.67126801152738, -89.20695392670157 13.67135734870317, -89.20713821989528 13.67144668587896, -89.20723036649214 13.67153602305475, -89.20723036649214 13.67162536023055, -89.20723036649214 13.67171469740634, -89.20723036649214 13.67180403458213, -89.20704607329843 13.67242939481268, -89.20686178010472 13.67305475504323, -89.20667748691099 13.67368011527378, -89.20658534031413 13.67394812680115, -89.20640104712042 13.6745734870317, -89.21036335078534 13.68118443804035, -89.21091623036649 13.68127377521614, -89.21146910994764 13.68136311239193, -89.21211413612565 13.68145244956772, -89.21266701570681 13.68154178674352, -89.21321989528796 13.68163112391931, -89.21377277486911 13.6817204610951, -89.21441780104712 13.68180979827089, -89.2146942408377 13.68189913544669, -89.21487853403141 13.68198847262248, -89.215154973822 13.68216714697406, -89.21543141361256 13.68234582132565, -89.21589214659686 13.68261383285303, -89.21616858638744 13.68279250720461, -89.21644502617801 13.6829711815562, -89.21672146596859 13.68314985590778, -89.21699790575916 13.68332853025936, -89.21727434554974 13.68350720461095, -89.21773507853403 13.68377521613833, -89.21801151832462 13.68395389048991, -89.21828795811518 13.6841325648415, -89.21856439790577 13.68431123919308, -89.21884083769633 13.68448991354467, -89.21911727748692 13.68466858789625, -89.22031518324607 13.68520461095101, -89.22059162303665 13.6852939481268, -89.22086806282722 13.68538328530259, -89.2211445026178 13.68547262247839, -89.22142094240837 13.68556195965418, -89.22169738219895 13.68565129682997, -89.22197382198954 13.68574063400576, -89.2222502617801 13.68582997118156, -89.22252670157069 13.68591930835735, -89.22280314136125 13.68600864553314, -89.22307958115184 13.68609798270893, -89.2233560209424 13.68618731988473, -89.22363246073299 13.68627665706052, -89.22390890052357 13.68636599423631, -89.22768691099476 13.68618731988473, -89.22833193717278 13.68591930835735, -89.22851623036649 13.68582997118156, -89.22870052356021 13.68574063400576, -89.22888481675393 13.68565129682997, -89.22906910994764 13.68556195965418, -89.22925340314137 13.68547262247839, -89.22943769633508 13.68538328530259, -89.22962198952879 13.6852939481268, -89.22980628272252 13.68520461095101, -89.22999057591623 13.68511527377522, -89.23017486910994 13.68502593659942, -89.23035916230367 13.68493659942363, -89.23054345549738 13.68484726224784, -89.23072774869111 13.68475792507205, -89.23091204188482 13.68466858789625, -89.23109633507853 13.68457925072046, -89.23174136125655 13.68431123919308, -89.23192565445027 13.68422190201729, -89.23358429319372 13.6828818443804, -89.23413717277488 13.68225648414986, -89.23469005235603 13.68163112391931, -89.23478219895289 13.68154178674352, -89.23533507853404 13.68091642651297, -89.23588795811519 13.68029106628242, -89.23598010471204 13.68020172910663, -89.2360722513089 13.68011239193084, -89.23616439790577 13.68002305475504, -89.23625654450262 13.67993371757925, -89.23634869109948 13.67984438040346, -89.23644083769634 13.67975504322767, -89.23653298429319 13.67966570605187, -89.23662513089006 13.67957636887608, -89.23671727748692 13.67948703170029, -89.23680942408377 13.67939769452449, -89.23690157068063 13.6793083573487, -89.23699371727749 13.67921902017291, -89.23754659685864 13.67859365994236, -89.23763874345551 13.67850432276657, -89.23773089005236 13.67841498559078, -89.23782303664922 13.67832564841498, -89.23791518324607 13.67823631123919, -89.23800732984293 13.6781469740634, -89.2380994764398 13.67805763688761, -89.23819162303666 13.67796829971181, -89.23828376963351 13.67787896253602, -89.23837591623037 13.67778962536023, -89.23846806282722 13.67770028818444, -89.23856020942409 13.67761095100865, -89.23865235602095 13.67752161383285, -89.23874450261781 13.67743227665706, -89.23883664921466 13.67734293948127, -89.23938952879581 13.67671757925072, -89.23948167539267 13.67662824207493, -89.23957382198954 13.67653890489913, -89.23966596858639 13.67644956772334, -89.23975811518325 13.67636023054755, -89.23985026178011 13.67627089337176, -89.23994240837696 13.67618155619597, -89.24003455497383 13.67609221902017, -89.24012670157069 13.67600288184438, -89.24021884816754 13.67591354466859, -89.2403109947644 13.67582420749279, -89.24040314136126 13.675734870317, -89.24049528795813 13.67564553314121, -89.24058743455498 13.67555619596542, -89.24114031413613 13.67493083573487, -89.24123246073299 13.67484149855908, -89.24132460732984 13.67475216138329, -89.2414167539267 13.67466282420749, -89.24150890052357 13.6745734870317, -89.24160104712043 13.67448414985591, -89.24169319371728 13.67439481268011, -89.24178534031414 13.67430547550432, -89.24298324607331 13.67251873198847, -89.24316753926702 13.67207204610951, -89.24335183246073 13.67162536023055, -89.24353612565446 13.67117867435158, -89.24362827225131 13.671, -89.24372041884817 13.671, -89.24381256544503 13.671, -89.24390471204188 13.671, -89.24399685863875 13.671, -89.24408900523561 13.671, -89.24418115183246 13.671, -89.24427329842932 13.671, -89.24436544502618 13.671, -89.24445759162305 13.671, -89.2445497382199 13.671, -89.24464188481676 13.671, -89.24473403141361 13.671, -89.24482617801047 13.671, -89.24491832460734 13.671, -89.2450104712042 13.671, -89.24510261780105 13.671, -89.24519476439791 13.671, -89.24528691099476 13.671, -89.24537905759163 13.671, -89.24547120418849 13.671, -89.24556335078535 13.671, -89.2456554973822 13.671, -89.24574764397906 13.671, -89.24583979057591 13.671, -89.24593193717278 13.671, -89.24602408376964 13.671, -89.2461162303665 13.671, -89.24620837696335 13.671, -89.24630052356021 13.671, -89.24639267015708 13.671, -89.24648481675393 13.671, -89.24657696335079 13.671, -89.24666910994765 13.671, -89.2467612565445 13.671, -89.24685340314137 13.671, -89.24694554973823 13.671, -89.24703769633508 13.671, -89.24712984293194 13.671, -89.2472219895288 13.671, -89.24731413612565 13.671, -89.24740628272252 13.671, -89.24749842931938 13.671, -89.24759057591623 13.671, -89.24768272251309 13.671, -89.24777486910995 13.671, -89.24786701570682 13.671, -89.24795916230367 13.671, -89.24805130890053 13.671, -89.24814345549738 13.671, -89.24823560209424 13.671, -89.24832774869111 13.671, -89.24841989528797 13.671, -89.24851204188482 13.671, -89.24860418848168 13.671, -89.24869633507853 13.671, -89.24878848167539 13.671, -89.24888062827226 13.671, -89.24897277486912 13.671, -89.24906492146597 13.671, -89.24915706806283 13.671, -89.24924921465968 13.671, -89.24934136125655 13.671, -89.24943350785341 13.671, -89.24952565445027 13.671, -89.24961780104712 13.671, -89.24970994764398 13.671, -89.24980209424085 13.671, -89.2498942408377 13.671, -89.24998638743456 13.671, -89.25007853403142 13.671, -89.25017068062827 13.671, -89.25026282722513 13.671, -89.250354973822 13.671, -89.25044712041885 13.671, -89.25053926701571 13.671, -89.25063141361257 13.671, -89.25072356020942 13.671, -89.25081570680629 13.671, -89.25090785340315 13.671))" shape_poly = wkt.loads(risk_shape) shape = gpd.GeoDataFrame() - shape['geometry'] = [shape_poly] - shape.crs = 'epsg:4326' + shape["geometry"] = [shape_poly] + shape.crs = "epsg:4326" shape.to_crs(epsg=3857, inplace=True) ax = shape.plot(figsize=(10, 10), alpha=0.5) ax.set_xlim(-9943223.896891385, -9911000.065720687) ax.set_ylim(1530712.637786494, 1555600.2891258441) ctx.add_basemap(ax, zoom=12, url=ctx.providers.Stamen.Terrain) - rect = patches.Rectangle((-9931038.907412536, 1536570.51725147), 4354.653554389253, - 2941.9125608841423, linewidth=1, edgecolor='r', facecolor='none') + rect = patches.Rectangle( + (-9931038.907412536, 1536570.51725147), + 4354.653554389253, + 2941.9125608841423, + linewidth=1, + edgecolor="r", + facecolor="none", + ) ax.add_patch(rect) ax.set_axis_off() fig = ax.get_figure() - ax.set_title('Metropolitan Area of San Salvador', fontsize=10) + ax.set_title("Metropolitan Area of San Salvador", fontsize=10) fig.tight_layout() return fig -from climada.entity import Exposures, Entity + +from climada.entity import Entity, Exposures from climada.hazard import Hazard + def load_entity(): - ent_file = 'FL_entity_Acelhuate_houses.xlsx' + ent_file = "FL_entity_Acelhuate_houses.xlsx" ent = Entity.from_excel(ent_file) ent.exposures.set_geometry_points() ent.check() return ent + +import cartopy.crs as ccrs import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np -import cartopy.crs as ccrs + def scale_bar(ax, length=None, location=(0.5, 0.05), linewidth=3): """ @@ -69,55 +81,75 @@ def scale_bar(ax, length=None, location=(0.5, 0.05), linewidth=3): (ie. 0.5 is the middle of the plot) linewidth is the thickness of the scalebar. """ - #Get the limits of the axis in lat long + # Get the limits of the axis in lat long llx0, llx1, lly0, lly1 = ax.get_extent(ccrs.PlateCarree()) - #Make tmc horizontally centred on the middle of the map, - #vertically at scale bar location + # Make tmc horizontally centred on the middle of the map, + # vertically at scale bar location sbllx = (llx1 + llx0) / 2 sblly = lly0 + (lly1 - lly0) * location[1] tmc = ccrs.TransverseMercator(sbllx, sblly) - #Get the extent of the plotted area in coordinates in metres + # Get the extent of the plotted area in coordinates in metres x0, x1, y0, y1 = ax.get_extent(tmc) - #Turn the specified scalebar location into coordinates in metres + # Turn the specified scalebar location into coordinates in metres sbx = x0 + (x1 - x0) * location[0] sby = y0 + (y1 - y0) * location[1] - #Calculate a scale bar length if none has been given - #(Theres probably a more pythonic way of rounding the number but this works) + # Calculate a scale bar length if none has been given + # (Theres probably a more pythonic way of rounding the number but this works) if not length: - length = (x1 - x0) / 5000 #in km - ndim = int(np.floor(np.log10(length))) #number of digits in number - length = round(length, -ndim) #round to 1sf - #Returns numbers starting with the list + length = (x1 - x0) / 5000 # in km + ndim = int(np.floor(np.log10(length))) # number of digits in number + length = round(length, -ndim) # round to 1sf + + # Returns numbers starting with the list def scale_number(x): - if str(x)[0] in ['1', '2', '5']: return int(x) - else: return scale_number(x - 10 ** ndim) + if str(x)[0] in ["1", "2", "5"]: + return int(x) + else: + return scale_number(x - 10**ndim) + length = scale_number(length) - #Generate the x coordinate for the ends of the scalebar + # Generate the x coordinate for the ends of the scalebar bar_xs = [sbx - length * 500, sbx + length * 500] - #Plot the scalebar - ax.plot(bar_xs, [sby, sby], transform=tmc, color='k', linewidth=linewidth) - #Plot the scalebar label - ax.text(sbx, sby, str(int(length*1000)) + ' m', transform=tmc, - horizontalalignment='center', verticalalignment='bottom') + # Plot the scalebar + ax.plot(bar_xs, [sby, sby], transform=tmc, color="k", linewidth=linewidth) + # Plot the scalebar label + ax.text( + sbx, + sby, + str(int(length * 1000)) + " m", + transform=tmc, + horizontalalignment="center", + verticalalignment="bottom", + ) + def plot_exposure_ss(exposures, point=None): if point is not None: - fig, ax = plt.subplots(figsize=(15, 15), subplot_kw=dict(projection=ccrs.Mercator())) - ax.scatter(exposures.gdf[point:point+1].geometry[:].x, exposures.gdf[point:point+1].geometry[:].y, c='k', - marker='+', s=800) + fig, ax = plt.subplots( + figsize=(15, 15), subplot_kw=dict(projection=ccrs.Mercator()) + ) + ax.scatter( + exposures.gdf[point : point + 1].geometry[:].x, + exposures.gdf[point : point + 1].geometry[:].y, + c="k", + marker="+", + s=800, + ) ax.set_xlim(-9931038.907412536, -9926684.253858147) ax.set_ylim(1536680.51725147, 1539512.429812354) else: # create new map for viviendas - cmap_viv = cm.get_cmap('autumn').resampled(4) - cmap_viv = mpl.colors.LinearSegmentedColormap.from_list('Custom cmap', - [cmap_viv(i) for i in range(cmap_viv.N)], cmap_viv.N) + cmap_viv = cm.get_cmap("autumn").resampled(4) + cmap_viv = mpl.colors.LinearSegmentedColormap.from_list( + "Custom cmap", [cmap_viv(i) for i in range(cmap_viv.N)], cmap_viv.N + ) # create new map for aups - cmap_aup = cm.get_cmap('winter').resampled(4) - cmap_aup = mpl.colors.LinearSegmentedColormap.from_list('Custom cmap', - [cmap_aup(i) for i in range(cmap_aup.N)], cmap_aup.N) + cmap_aup = cm.get_cmap("winter").resampled(4) + cmap_aup = mpl.colors.LinearSegmentedColormap.from_list( + "Custom cmap", [cmap_aup(i) for i in range(cmap_aup.N)], cmap_aup.N + ) # define the bins and normalize bounds_aup = np.array([6000, 8800, 10000, 12000, 14600]) @@ -125,34 +157,79 @@ def plot_exposure_ss(exposures, point=None): bounds_viv = np.array([7500, 11000, 16500, 33000, 56300]) norm_viv = mpl.colors.BoundaryNorm(bounds_viv, cmap_viv.N) - exp_merc_aup = exposures.gdf[exposures.gdf.category==1] - exp_merc_house = exposures.gdf[exposures.gdf.category==2] - - fig, ax = plt.subplots(figsize=(15, 15), subplot_kw=dict(projection=ccrs.Mercator())) - clr_1 = ax.scatter(exp_merc_aup.geometry[:].x, exp_merc_aup.geometry[:].y, c=exp_merc_aup.value.values, - marker='+', s=25, cmap=cmap_aup, norm=norm_aup) - clr_2 = ax.scatter(exp_merc_house.geometry[:].x, exp_merc_house.geometry[:].y, c=exp_merc_house.value.values, - marker='o', s=8, cmap=cmap_viv, norm=norm_viv) + exp_merc_aup = exposures.gdf[exposures.gdf.category == 1] + exp_merc_house = exposures.gdf[exposures.gdf.category == 2] + + fig, ax = plt.subplots( + figsize=(15, 15), subplot_kw=dict(projection=ccrs.Mercator()) + ) + clr_1 = ax.scatter( + exp_merc_aup.geometry[:].x, + exp_merc_aup.geometry[:].y, + c=exp_merc_aup.value.values, + marker="+", + s=25, + cmap=cmap_aup, + norm=norm_aup, + ) + clr_2 = ax.scatter( + exp_merc_house.geometry[:].x, + exp_merc_house.geometry[:].y, + c=exp_merc_house.value.values, + marker="o", + s=8, + cmap=cmap_viv, + norm=norm_viv, + ) lines_legend = [] text_legend = [] for i_col, x_col in enumerate(np.linspace(0, 1, 4)): - lines_legend.append(mpl.lines.Line2D(range(1), range(1), color='white', marker='o', markerfacecolor=cmap_viv(x_col))) - text_legend.append(str(bounds_viv[i_col]) + ' - ' + str(bounds_viv[i_col+1])) - legend1 = plt.legend(lines_legend, text_legend, numpoints=1, loc=4, title='no AUP housing') + lines_legend.append( + mpl.lines.Line2D( + range(1), + range(1), + color="white", + marker="o", + markerfacecolor=cmap_viv(x_col), + ) + ) + text_legend.append( + str(bounds_viv[i_col]) + " - " + str(bounds_viv[i_col + 1]) + ) + legend1 = plt.legend( + lines_legend, text_legend, numpoints=1, loc=4, title="no AUP housing" + ) lines_legend = [] text_legend = [] for i_col, x_col in enumerate(np.linspace(0, 1, 4)): - lines_legend.append(mpl.lines.Line2D(range(1), range(1), color=cmap_aup(x_col), marker='+', markerfacecolor=cmap_aup(x_col))) - text_legend.append(str(bounds_aup[i_col]) + ' - ' + str(bounds_aup[i_col+1])) - plt.legend(lines_legend, text_legend, numpoints=1, loc=3, title='AUP housing') + lines_legend.append( + mpl.lines.Line2D( + range(1), + range(1), + color=cmap_aup(x_col), + marker="+", + markerfacecolor=cmap_aup(x_col), + ) + ) + text_legend.append( + str(bounds_aup[i_col]) + " - " + str(bounds_aup[i_col + 1]) + ) + plt.legend(lines_legend, text_legend, numpoints=1, loc=3, title="AUP housing") plt.gca().add_artist(legend1) - ctx.add_basemap(ax, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, origin='upper') + ctx.add_basemap(ax, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, origin="upper") scale_bar(ax, 0.5, location=(0.93, 0.4), linewidth=2) - rect = patches.Rectangle((-9931033.307412536, 1536686.51725147), 4345.053554389253, - 2934.0125608841423, linewidth=2, edgecolor='r', facecolor='none', zorder=200) + rect = patches.Rectangle( + (-9931033.307412536, 1536686.51725147), + 4345.053554389253, + 2934.0125608841423, + linewidth=2, + edgecolor="r", + facecolor="none", + zorder=200, + ) ax.add_patch(rect) ax.set_axis_off() if point is not None: @@ -162,37 +239,41 @@ def plot_exposure_ss(exposures, point=None): # fig.savefig('ss_points.png', format='png', bbox_inches='tight') return fig + def flooding_aup_if(impact_funcs): - mdd = impact_funcs.get_func('FL', 101).mdd - intensity = impact_funcs.get_func('FL', 101).intensity + mdd = impact_funcs.get_func("FL", 101).mdd + intensity = impact_funcs.get_func("FL", 101).intensity fig, ax = plt.subplots() - ax.set_xlabel('Intensity (m)') - ax.set_ylabel('Mean Damage Ratio (%)') - ax.set_title('Impact Function - AUP flooding') - ax.plot(intensity, mdd*100) + ax.set_xlabel("Intensity (m)") + ax.set_ylabel("Mean Damage Ratio (%)") + ax.set_title("Impact Function - AUP flooding") + ax.plot(intensity, mdd * 100) fig.set_size_inches(4.5, 4.5) - #fig.savefig('if_house_aup.png', format='png', bbox_inches='tight') + # fig.savefig('if_house_aup.png', format='png', bbox_inches='tight') return fig + import pandas as pd + def load_accounting(): acc = pd.DataFrame() - acc['Return Period (year)'] = np.array([10, 25, 50, 100]) - acc['frequency (1/year)'] = np.array([1/10, 1/25, 1/50, 1/100]) - acc['intensity (m)'] = np.array([0.7744541, 2.820973, 4.828216, 5.742804]) - acc['Mean Damage Ration (%)'] = np.array([51.83603012, 100, 100, 100]) - acc['impact (USD)'] = np.array([4786.95371, 9234.8, 9234.8, 9234.8]) - acc['frequency * impact'] = np.array([478.695371, 369.392, 184.696, 92.348]) - acc['Expected Annual Impact'] = np.ones(4)*np.nan - acc['Expected Annual Impact'].values[0] = 1125.131371 - #acc_file = 'accounting.xlsx' - #acc_df = pd.read_excel(acc_file) + acc["Return Period (year)"] = np.array([10, 25, 50, 100]) + acc["frequency (1/year)"] = np.array([1 / 10, 1 / 25, 1 / 50, 1 / 100]) + acc["intensity (m)"] = np.array([0.7744541, 2.820973, 4.828216, 5.742804]) + acc["Mean Damage Ration (%)"] = np.array([51.83603012, 100, 100, 100]) + acc["impact (USD)"] = np.array([4786.95371, 9234.8, 9234.8, 9234.8]) + acc["frequency * impact"] = np.array([478.695371, 369.392, 184.696, 92.348]) + acc["Expected Annual Impact"] = np.ones(4) * np.nan + acc["Expected Annual Impact"].values[0] = 1125.131371 + # acc_file = 'accounting.xlsx' + # acc_df = pd.read_excel(acc_file) acc.index += 1 return acc + def generate_plots_risk(): fig_ma = plot_salvador_ma() ent = load_entity() @@ -205,26 +286,26 @@ def generate_plots_risk(): return fig_ma, fig_point, fig_houses, fig_if + def non_linear_growth(cb_acel): - risk_present = 3.562753447707e+06 - risk_future = 7.578426440635e+06 + risk_present = 3.562753447707e06 + risk_future = 7.578426440635e06 - x_var = np.arange(cb_acel.present_year, cb_acel.future_year+1) + x_var = np.arange(cb_acel.present_year, cb_acel.future_year + 1) time_dep = cb_acel._time_dependency_array(0.5) - y_sqr = risk_present + (risk_future-risk_present) * time_dep + y_sqr = risk_present + (risk_future - risk_present) * time_dep time_dep = cb_acel._time_dependency_array(1.0) - y_lin = risk_present + (risk_future-risk_present) * time_dep + y_lin = risk_present + (risk_future - risk_present) * time_dep time_dep = cb_acel._time_dependency_array(2.0) - y_quad = risk_present + (risk_future-risk_present) * time_dep + y_quad = risk_present + (risk_future - risk_present) * time_dep - plt.bar(x_var, y_sqr, color='green', label='sublinear') - plt.bar(x_var, y_lin, color='blue', label='linear') - plt.bar(x_var, y_quad, color='red', label='superlinear') - plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) + plt.bar(x_var, y_sqr, color="green", label="sublinear") + plt.bar(x_var, y_lin, color="blue", label="linear") + plt.bar(x_var, y_quad, color="red", label="superlinear") + plt.ticklabel_format(style="sci", axis="y", scilimits=(0, 0)) plt.ylim(3.0e6, 7.8e6) - plt.xlabel('Year') - plt.ylabel('Expected Annual Impact') + plt.xlabel("Year") + plt.ylabel("Expected Annual Impact") plt.legend() - diff --git a/script/jenkins/set_config.py b/script/jenkins/set_config.py index 406eabb5e9..75c4a16959 100644 --- a/script/jenkins/set_config.py +++ b/script/jenkins/set_config.py @@ -1,12 +1,12 @@ -import sys import json +import sys key = sys.argv[1] val = sys.argv[2] -jsonfile = 'climada.conf' +jsonfile = "climada.conf" -with open(jsonfile, encoding='UTF-8') as inf: +with open(jsonfile, encoding="UTF-8") as inf: data = json.load(inf) data[key] = val -with open(jsonfile, 'w', encoding='UTF-8') as outf: +with open(jsonfile, "w", encoding="UTF-8") as outf: json.dump(data, outf) diff --git a/script/jenkins/test_data_api.py b/script/jenkins/test_data_api.py index 42e9103744..38eec4cd30 100644 --- a/script/jenkins/test_data_api.py +++ b/script/jenkins/test_data_api.py @@ -19,31 +19,36 @@ Test files_handler module. """ +import datetime as dt +import unittest from pathlib import Path from sys import dont_write_bytecode -import pandas as pd -import unittest -import xmlrunner -import datetime as dt import numpy as np +import pandas as pd +import xmlrunner from pandas_datareader import wb from climada import CONFIG from climada.entity.exposures.litpop.nightlight import BM_FILENAMES, download_nl_files -from climada.hazard.tc_tracks import IBTRACS_URL, IBTRACS_FILE -from climada.util.finance import WORLD_BANK_WEALTH_ACC, WORLD_BANK_INC_GRP -from climada.util.dwd_icon_loader import (download_icon_grib, - delete_icon_grib, - download_icon_centroids_file) +from climada.hazard.tc_tracks import IBTRACS_FILE, IBTRACS_URL +from climada.util.dwd_icon_loader import ( + delete_icon_grib, + download_icon_centroids_file, + download_icon_grib, +) from climada.util.files_handler import download_file, download_ftp +from climada.util.finance import WORLD_BANK_INC_GRP, WORLD_BANK_WEALTH_ACC + class TestDataAvail(unittest.TestCase): """Test availability of data used through APIs""" def test_noaa_nl_pass(self): """Test NOAA nightlights used in BlackMarble.""" - file_down = download_file(f'{CONFIG.exposures.litpop.nightlights.noaa_url.str()}/F101992.v4.tar') + file_down = download_file( + f"{CONFIG.exposures.litpop.nightlights.noaa_url.str()}/F101992.v4.tar" + ) Path(file_down).unlink() def test_nasa_nl_pass(self): @@ -72,11 +77,11 @@ def test_wb_lev_hist_pass(self): def test_wb_api_pass(self): """Test World Bank API""" - wb.download(indicator='NY.GDP.MKTP.CD', country='CHE', start=1960, end=2030) + wb.download(indicator="NY.GDP.MKTP.CD", country="CHE", start=1960, end=2030) def test_ne_api_pass(self): """Test Natural Earth API""" - url = 'https://naturalearth.s3.amazonaws.com/10m_cultural/ne_10m_admin_0_countries.zip' + url = "https://naturalearth.s3.amazonaws.com/10m_cultural/ne_10m_admin_0_countries.zip" file_down = download_file(url) Path(file_down).unlink() @@ -87,41 +92,41 @@ def test_ibtracs_pass(self): def test_icon_eu_forecast_download(self): """Test availability of DWD icon forecast.""" run_datetime = dt.datetime.utcnow() - dt.timedelta(hours=5) - run_datetime = run_datetime.replace(hour=run_datetime.hour//12*12, - minute=0, - second=0, - microsecond=0) - icon_file = download_icon_grib(run_datetime,max_lead_time=1) + run_datetime = run_datetime.replace( + hour=run_datetime.hour // 12 * 12, minute=0, second=0, microsecond=0 + ) + icon_file = download_icon_grib(run_datetime, max_lead_time=1) self.assertEqual(len(icon_file), 1) - delete_icon_grib(run_datetime,max_lead_time=1) #deletes icon_file + delete_icon_grib(run_datetime, max_lead_time=1) # deletes icon_file self.assertFalse(Path(icon_file[0]).exists()) def test_icon_d2_forecast_download(self): """Test availability of DWD icon forecast.""" run_datetime = dt.datetime.utcnow() - dt.timedelta(hours=5) - run_datetime = run_datetime.replace(hour=run_datetime.hour//12*12, - minute=0, - second=0, - microsecond=0) - icon_file = download_icon_grib(run_datetime, - model_name='icon-d2-eps', - max_lead_time=1) + run_datetime = run_datetime.replace( + hour=run_datetime.hour // 12 * 12, minute=0, second=0, microsecond=0 + ) + icon_file = download_icon_grib( + run_datetime, model_name="icon-d2-eps", max_lead_time=1 + ) self.assertEqual(len(icon_file), 1) - delete_icon_grib(run_datetime, - model_name='icon-d2-eps', - max_lead_time=1) #deletes icon_file + delete_icon_grib( + run_datetime, model_name="icon-d2-eps", max_lead_time=1 + ) # deletes icon_file self.assertFalse(Path(icon_file[0]).exists()) def test_icon_centroids_download(self): """Test availablility of DWD icon grid information.""" grid_file = download_icon_centroids_file() Path(grid_file).unlink() - grid_file = download_icon_centroids_file(model_name='icon-d2-eps') + grid_file = download_icon_centroids_file(model_name="icon-d2-eps") Path(grid_file).unlink() + # Execute Tests -if __name__ == '__main__': +if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestDataAvail) from sys import argv - outputdir = argv[1] if len(argv) > 1 else str(Path.cwd().joinpath('tests_xml')) + + outputdir = argv[1] if len(argv) > 1 else str(Path.cwd().joinpath("tests_xml")) xmlrunner.XMLTestRunner(output=outputdir).run(TESTS) diff --git a/script/jenkins/test_notebooks.py b/script/jenkins/test_notebooks.py index bb0420194c..f2e4fcdbcc 100644 --- a/script/jenkins/test_notebooks.py +++ b/script/jenkins/test_notebooks.py @@ -6,20 +6,20 @@ import sys import unittest from pathlib import Path + import nbformat import climada +BOUND_TO_FAIL = "# Note: execution of this cell will fail" +"""Cells containing this line will not be executed in the test""" -BOUND_TO_FAIL = '# Note: execution of this cell will fail' -'''Cells containing this line will not be executed in the test''' - -EXCLUDED_FROM_NOTEBOOK_TEST = ['climada_installation_step_by_step.ipynb'] -'''These notebooks are excluded from being tested''' +EXCLUDED_FROM_NOTEBOOK_TEST = ["climada_installation_step_by_step.ipynb"] +"""These notebooks are excluded from being tested""" class NotebookTest(unittest.TestCase): - '''Generic TestCase for testing the executability of notebooks + """Generic TestCase for testing the executability of notebooks Attributes ---------- @@ -28,7 +28,7 @@ class NotebookTest(unittest.TestCase): notebook : str File name of the notebook. - ''' + """ def __init__(self, methodName, wd=None, notebook=None): super(NotebookTest, self).__init__(methodName) @@ -36,64 +36,81 @@ def __init__(self, methodName, wd=None, notebook=None): self.notebook = notebook def test_notebook(self): - '''Extracts code cells from the notebook and executes them one by one, using `exec`. + """Extracts code cells from the notebook and executes them one by one, using `exec`. Magic lines and help/? calls are eliminated. Cells containing `BOUND_TO_FAIL` are elided. - Cells doing multiprocessing are elided.''' + Cells doing multiprocessing are elided.""" cwd = Path.cwd() try: # cd to the notebook directory os.chdir(self.wd) - print(f'start testing {self.notebook}') + print(f"start testing {self.notebook}") # read the notebook into a string - with open(self.notebook, encoding='utf8') as nb: + with open(self.notebook, encoding="utf8") as nb: content = nb.read() # parse the string with nbformat.reads - cells = nbformat.reads(content, 4)['cells'] + cells = nbformat.reads(content, 4)["cells"] # create namespace with IPython standards namespace = dict() - exec('from IPython.display import display', namespace) + exec("from IPython.display import display", namespace) # run all cells i = 0 for c in cells: # skip markdown cells - if c['cell_type'] != 'code': continue + if c["cell_type"] != "code": + continue i += 1 # skip deliberately failing cells - if BOUND_TO_FAIL in c['source']: continue + if BOUND_TO_FAIL in c["source"]: + continue # skip multiprocessing cells - if any([ tabu in c['source'].split() for tabu in [ - 'import multiprocessing', - 'from multiprocessing import', - ]]): - print('\n'.join([ - f'\nskip multiprocessing cell {i} in {self.notebook}', - '+'+'-'*68+'+', - c['source'] - ])) + if any( + [ + tabu in c["source"].split() + for tabu in [ + "import multiprocessing", + "from multiprocessing import", + ] + ] + ): + print( + "\n".join( + [ + f"\nskip multiprocessing cell {i} in {self.notebook}", + "+" + "-" * 68 + "+", + c["source"], + ] + ) + ) continue # remove non python lines and help calls which require user input # or involve pools being opened/closed - python_code = "\n".join([ - re.sub(r'pool=\w+', 'pool=None', ln) - for ln in c['source'].split("\n") - if not ln.startswith('%') - and not ln.startswith('help(') - and not ln.startswith('ask_ok(') - and not ln.startswith('ask_ok(') - and not ln.startswith('pool') # by convention Pool objects are called pool - and not ln.strip().endswith('?') - and not re.search(r'(\W|^)Pool\(', ln) # prevent Pool object creation - ]) + python_code = "\n".join( + [ + re.sub(r"pool=\w+", "pool=None", ln) + for ln in c["source"].split("\n") + if not ln.startswith("%") + and not ln.startswith("help(") + and not ln.startswith("ask_ok(") + and not ln.startswith("ask_ok(") + and not ln.startswith( + "pool" + ) # by convention Pool objects are called pool + and not ln.strip().endswith("?") + and not re.search( + r"(\W|^)Pool\(", ln + ) # prevent Pool object creation + ] + ) # execute the python code try: @@ -101,53 +118,60 @@ def test_notebook(self): # report failures except Exception as e: - failure = "\n".join([ - f"notebook {self.notebook} cell {i} failed with {e.__class__}", - f"{e}", - '+'+'-'*68+'+', - c['source'] - ]) - print(f'failed {self.notebook}') + failure = "\n".join( + [ + f"notebook {self.notebook} cell {i} failed with {e.__class__}", + f"{e}", + "+" + "-" * 68 + "+", + c["source"], + ] + ) + print(f"failed {self.notebook}") print(failure) self.fail(failure) - print(f'succeeded {self.notebook}') + print(f"succeeded {self.notebook}") finally: os.chdir(cwd) def main(install_dir): import xmlrunner - + sys.path.append(str(install_dir)) - - notebook_dir = install_dir.joinpath('doc', 'tutorial') - '''The path to the notebook directories.''' + + notebook_dir = install_dir.joinpath("doc", "tutorial") + """The path to the notebook directories.""" # list notebooks in the NOTEBOOK_DIR - notebooks = [f.absolute() - for f in sorted(notebook_dir.iterdir()) - if os.path.splitext(f)[1] == ('.ipynb') - and not f.name in EXCLUDED_FROM_NOTEBOOK_TEST] + notebooks = [ + f.absolute() + for f in sorted(notebook_dir.iterdir()) + if os.path.splitext(f)[1] == (".ipynb") + and not f.name in EXCLUDED_FROM_NOTEBOOK_TEST + ] # build a test suite with a test for each notebook suite = unittest.TestSuite() for notebook in notebooks: - class NBTest(NotebookTest): pass + + class NBTest(NotebookTest): + pass + test_name = "_".join(notebook.stem.split()) setattr(NBTest, test_name, NBTest.test_notebook) suite.addTest(NBTest(test_name, notebook.parent, notebook.name)) # run the tests and write xml reports to tests_xml - output_dir = install_dir.joinpath('tests_xml') + output_dir = install_dir.joinpath("tests_xml") xmlrunner.XMLTestRunner(output=str(output_dir)).run(suite) -if __name__ == '__main__': - if sys.argv[1] == 'report': +if __name__ == "__main__": + if sys.argv[1] == "report": install_dir = Path(sys.argv[2]) if len(sys.argv) > 2 else Path.cwd() main(install_dir) - + else: jd, nb = os.path.split(sys.argv[1]) - unittest.TextTestRunner(verbosity=2).run(NotebookTest('test_notebook', jd, nb)) + unittest.TextTestRunner(verbosity=2).run(NotebookTest("test_notebook", jd, nb)) From 48648d956ffe3ce966236bed36cd610f2e18c0f4 Mon Sep 17 00:00:00 2001 From: Emanuel Schmid <51439563+emanuel-schmid@users.noreply.github.com> Date: Mon, 21 Oct 2024 11:05:16 +0200 Subject: [PATCH 2/4] format exceptions (#964) * format * import Impact from climada.engine.impact in order to avoid circular imports * avoid circular imports * pre-commit run --all-files * format exceptions --- climada/engine/test/test_impact_calc.py | 72 +-- climada/entity/impact_funcs/trop_cyclone.py | 509 ++------------------ climada/entity/measures/test/test_base.py | 167 +------ climada/hazard/test/test_tc_tracks.py | 188 ++------ climada/hazard/test/test_tc_tracks_synth.py | 289 +++-------- climada/hazard/test/test_trop_cyclone.py | 108 +---- climada/util/constants.py | 254 +--------- 7 files changed, 233 insertions(+), 1354 deletions(-) diff --git a/climada/engine/test/test_impact_calc.py b/climada/engine/test/test_impact_calc.py index 489f66a00d..3f19e26327 100644 --- a/climada/engine/test/test_impact_calc.py +++ b/climada/engine/test/test_impact_calc.py @@ -244,80 +244,40 @@ def test_calc_impact_RF_pass(self): 0.00000000e00, ] ) + # fmt: off imp_mat_array = np.array( [ [ - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, + 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, + 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, ], [ - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, + 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, + 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, ], [ - 0.00000000e00, - 6.41965663e04, - 0.00000000e00, - 2.02249434e02, - 3.41245461e04, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, + 0.00000000e00, 6.41965663e04, 0.00000000e00, 2.02249434e02, + 3.41245461e04, 0.00000000e00, 0.00000000e00, 0.00000000e00, ], [ - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 3.41245461e04, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, + 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, + 3.41245461e04, 0.00000000e00, 0.00000000e00, 0.00000000e00, ], [ - 7.73566566e07, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, + 7.73566566e07, 0.00000000e00, 0.00000000e00, 0.00000000e00, + 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, ], [ - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, + 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, + 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, ], [ - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, + 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, + 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, ], ] ) + # fmt: on check_impact(self, impact, haz, exp, aai_agg, eai_exp, at_event, imp_mat_array) def test_empty_impact(self): diff --git a/climada/entity/impact_funcs/trop_cyclone.py b/climada/entity/impact_funcs/trop_cyclone.py index 18492bbb12..692e0ef92a 100644 --- a/climada/entity/impact_funcs/trop_cyclone.py +++ b/climada/entity/impact_funcs/trop_cyclone.py @@ -311,123 +311,26 @@ def get_countries_per_region(region=None): """ if not region: region = "all" + # fmt: off iso3n = { "NA1": [ - 660, - 28, - 32, - 533, - 44, - 52, - 84, - 60, - 68, - 132, - 136, - 152, - 170, - 188, - 192, - 212, - 214, - 218, - 222, - 238, - 254, - 308, - 312, - 320, - 328, - 332, - 340, - 388, - 474, - 484, - 500, - 558, - 591, - 600, - 604, - 630, - 654, - 659, - 662, - 670, - 534, - 740, - 780, - 796, - 858, - 862, - 92, - 850, + 660, 28, 32, 533, 44, 52, 84, 60, 68, 132, + 136, 152, 170, 188, 192, 212, 214, 218, 222, 238, + 254, 308, 312, 320, 328, 332, 340, 388, 474, 484, + 500, 558, 591, 600, 604, 630, 654, 659, 662, 670, + 534, 740, 780, 796, 858, 862, 92, 850, ], "NA2": [124, 840], "NI": [ - 4, - 51, - 31, - 48, - 50, - 64, - 262, - 232, - 231, - 268, - 356, - 364, - 368, - 376, - 400, - 398, - 414, - 417, - 422, - 462, - 496, - 104, - 524, - 512, - 586, - 634, - 682, - 706, - 144, - 760, - 762, - 795, - 800, - 784, - 860, - 887, + 4, 51, 31, 48, 50, 64, 262, 232, 231, 268, + 356, 364, 368, 376, 400, 398, 414, 417, 422, 462, + 496, 104, 524, 512, 586, 634, 682, 706, 144, 760, + 762, 795, 800, 784, 860, 887, ], "OC": [ - 16, - 36, - 184, - 242, - 258, - 316, - 296, - 584, - 583, - 520, - 540, - 554, - 570, - 574, - 580, - 585, - 598, - 612, - 882, - 90, - 626, - 772, - 776, - 798, - 548, - 876, + 16, 36, 184, 242, 258, 316, 296, 584, 583, 520, + 540, 554, 570, 574, 580, 585, 598, 612, 882, 90, + 626, 772, 776, 798, 548, 876, ], "SI": [174, 180, 748, 450, 454, 466, 480, 508, 710, 834, 716], "WP1": [116, 360, 418, 458, 764, 704], @@ -435,251 +338,42 @@ def get_countries_per_region(region=None): "WP3": [156], "WP4": [344, 392, 410, 446, 158], "ROW": [ - 8, - 12, - 20, - 24, - 10, - 40, - 112, - 56, - 204, - 535, - 70, - 72, - 74, - 76, - 86, - 96, - 100, - 854, - 108, - 120, - 140, - 148, - 162, - 166, - 178, - 191, - 531, - 196, - 203, - 384, - 208, - 818, - 226, - 233, - 234, - 246, - 250, - 260, - 266, - 270, - 276, - 288, - 292, - 300, - 304, - 831, - 324, - 624, - 334, - 336, - 348, - 352, - 372, - 833, - 380, - 832, - 404, - 408, - 983, - 428, - 426, - 430, - 434, - 438, - 440, - 442, - 470, - 478, - 175, - 498, - 492, - 499, - 504, - 516, - 528, - 562, - 566, - 807, - 578, - 275, - 616, - 620, - 642, - 643, - 646, - 638, - 652, - 663, - 666, - 674, - 678, - 686, - 688, - 690, - 694, - 702, - 703, - 705, - 239, - 728, - 724, - 729, - 744, - 752, - 756, - 768, - 788, - 792, - 804, - 826, - 581, - 732, - 894, - 248, + 8, 12, 20, 24, 10, 40, 112, 56, 204, 535, + 70, 72, 74, 76, 86, 96, 100, 854, 108, 120, + 140, 148, 162, 166, 178, 191, 531, 196, 203, 384, + 208, 818, 226, 233, 234, 246, 250, 260, 266, 270, + 276, 288, 292, 300, 304, 831, 324, 624, 334, 336, + 348, 352, 372, 833, 380, 832, 404, 408, 983, 428, + 426, 430, 434, 438, 440, 442, 470, 478, 175, 498, + 492, 499, 504, 516, 528, 562, 566, 807, 578, 275, + 616, 620, 642, 643, 646, 638, 652, 663, 666, 674, + 678, 686, 688, 690, 694, 702, 703, 705, 239, 728, + 724, 729, 744, 752, 756, 768, 788, 792, 804, 826, + 581, 732, 894, 248, ], } iso3a = { "NA1": [ - "AIA", - "ATG", - "ARG", - "ABW", - "BHS", - "BRB", - "BLZ", - "BMU", - "BOL", - "CPV", - "CYM", - "CHL", - "COL", - "CRI", - "CUB", - "DMA", - "DOM", - "ECU", - "SLV", - "FLK", - "GUF", - "GRD", - "GLP", - "GTM", - "GUY", - "HTI", - "HND", - "JAM", - "MTQ", - "MEX", - "MSR", - "NIC", - "PAN", - "PRY", - "PER", - "PRI", - "SHN", - "KNA", - "LCA", - "VCT", - "SXM", - "SUR", - "TTO", - "TCA", - "URY", - "VEN", - "VGB", - "VIR", + "AIA", "ATG", "ARG", "ABW", "BHS", "BRB", "BLZ", "BMU", "BOL", "CPV", + "CYM", "CHL", "COL", "CRI", "CUB", "DMA", "DOM", "ECU", "SLV", "FLK", + "GUF", "GRD", "GLP", "GTM", "GUY", "HTI", "HND", "JAM", "MTQ", "MEX", + "MSR", "NIC", "PAN", "PRY", "PER", "PRI", "SHN", "KNA", "LCA", "VCT", + "SXM", "SUR", "TTO", "TCA", "URY", "VEN", "VGB", "VIR", ], "NA2": ["CAN", "USA"], "NI": [ - "AFG", - "ARM", - "AZE", - "BHR", - "BGD", - "BTN", - "DJI", - "ERI", - "ETH", - "GEO", - "IND", - "IRN", - "IRQ", - "ISR", - "JOR", - "KAZ", - "KWT", - "KGZ", - "LBN", - "MDV", - "MNG", - "MMR", - "NPL", - "OMN", - "PAK", - "QAT", - "SAU", - "SOM", - "LKA", - "SYR", - "TJK", - "TKM", - "UGA", - "ARE", - "UZB", - "YEM", + "AFG", "ARM", "AZE", "BHR", "BGD", "BTN", "DJI", "ERI", "ETH", "GEO", + "IND", "IRN", "IRQ", "ISR", "JOR", "KAZ", "KWT", "KGZ", "LBN", "MDV", + "MNG", "MMR", "NPL", "OMN", "PAK", "QAT", "SAU", "SOM", "LKA", "SYR", + "TJK", "TKM", "UGA", "ARE", "UZB", "YEM", ], "OC": [ - "ASM", - "AUS", - "COK", - "FJI", - "PYF", - "GUM", - "KIR", - "MHL", - "FSM", - "NRU", - "NCL", - "NZL", - "NIU", - "NFK", - "MNP", - "PLW", - "PNG", - "PCN", - "WSM", - "SLB", - "TLS", - "TKL", - "TON", - "TUV", - "VUT", - "WLF", + "ASM", "AUS", "COK", "FJI", "PYF", "GUM", "KIR", "MHL", "FSM", "NRU", + "NCL", "NZL", "NIU", "NFK", "MNP", "PLW", "PNG", "PCN", "WSM", "SLB", + "TLS", "TKL", "TON", "TUV", "VUT", "WLF", ], "SI": [ - "COM", - "COD", - "SWZ", - "MDG", - "MWI", - "MLI", - "MUS", - "MOZ", - "ZAF", - "TZA", + "COM", "COD", "SWZ", "MDG", "MWI", "MLI", "MUS", "MOZ", "ZAF", "TZA", "ZWE", ], "WP1": ["KHM", "IDN", "LAO", "MYS", "THA", "VNM"], @@ -687,122 +381,21 @@ def get_countries_per_region(region=None): "WP3": ["CHN"], "WP4": ["HKG", "JPN", "KOR", "MAC", "TWN"], "ROW": [ - "ALB", - "DZA", - "AND", - "AGO", - "ATA", - "AUT", - "BLR", - "BEL", - "BEN", - "BES", - "BIH", - "BWA", - "BVT", - "BRA", - "IOT", - "BRN", - "BGR", - "BFA", - "BDI", - "CMR", - "CAF", - "TCD", - "CXR", - "CCK", - "COG", - "HRV", - "CUW", - "CYP", - "CZE", - "CIV", - "DNK", - "EGY", - "GNQ", - "EST", - "FRO", - "FIN", - "FRA", - "ATF", - "GAB", - "GMB", - "DEU", - "GHA", - "GIB", - "GRC", - "GRL", - "GGY", - "GIN", - "GNB", - "HMD", - "VAT", - "HUN", - "ISL", - "IRL", - "IMN", - "ITA", - "JEY", - "KEN", - "PRK", - "XKX", - "LVA", - "LSO", - "LBR", - "LBY", - "LIE", - "LTU", - "LUX", - "MLT", - "MRT", - "MYT", - "MDA", - "MCO", - "MNE", - "MAR", - "NAM", - "NLD", - "NER", - "NGA", - "MKD", - "NOR", - "PSE", - "POL", - "PRT", - "ROU", - "RUS", - "RWA", - "REU", - "BLM", - "MAF", - "SPM", - "SMR", - "STP", - "SEN", - "SRB", - "SYC", - "SLE", - "SGP", - "SVK", - "SVN", - "SGS", - "SSD", - "ESP", - "SDN", - "SJM", - "SWE", - "CHE", - "TGO", - "TUN", - "TUR", - "UKR", - "GBR", - "UMI", - "ESH", - "ZMB", - "ALA", + "ALB", "DZA", "AND", "AGO", "ATA", "AUT", "BLR", "BEL", "BEN", "BES", + "BIH", "BWA", "BVT", "BRA", "IOT", "BRN", "BGR", "BFA", "BDI", "CMR", + "CAF", "TCD", "CXR", "CCK", "COG", "HRV", "CUW", "CYP", "CZE", "CIV", + "DNK", "EGY", "GNQ", "EST", "FRO", "FIN", "FRA", "ATF", "GAB", "GMB", + "DEU", "GHA", "GIB", "GRC", "GRL", "GGY", "GIN", "GNB", "HMD", "VAT", + "HUN", "ISL", "IRL", "IMN", "ITA", "JEY", "KEN", "PRK", "XKX", "LVA", + "LSO", "LBR", "LBY", "LIE", "LTU", "LUX", "MLT", "MRT", "MYT", "MDA", + "MCO", "MNE", "MAR", "NAM", "NLD", "NER", "NGA", "MKD", "NOR", "PSE", + "POL", "PRT", "ROU", "RUS", "RWA", "REU", "BLM", "MAF", "SPM", "SMR", + "STP", "SEN", "SRB", "SYC", "SLE", "SGP", "SVK", "SVN", "SGS", "SSD", + "ESP", "SDN", "SJM", "SWE", "CHE", "TGO", "TUN", "TUR", "UKR", "GBR", + "UMI", "ESH", "ZMB", "ALA", ], } + # fmt: on impf_id = { "NA1": 1, "NA2": 2, diff --git a/climada/entity/measures/test/test_base.py b/climada/entity/measures/test/test_base.py index 520229ffca..4f14f4a5ad 100644 --- a/climada/entity/measures/test/test_base.py +++ b/climada/entity/measures/test/test_base.py @@ -151,84 +151,20 @@ def test_cutoff_hazard_pass(self): new_haz = act_1._cutoff_hazard_damage(exp, imp_set, haz) self.assertFalse(id(new_haz) == id(haz)) - + # fmt: off pos_no_null = np.array( [ - 6249, - 7697, - 9134, - 13500, - 13199, - 5944, - 9052, - 9050, - 2429, - 5139, - 9053, - 7102, - 4096, - 1070, - 5948, - 1076, - 5947, - 7432, - 5949, - 11694, - 5484, - 6246, - 12147, - 778, - 3326, - 7199, - 12498, - 11698, - 6245, - 5327, - 4819, - 8677, - 5970, - 7101, - 779, - 3894, - 9051, - 5976, - 3329, - 5978, - 4282, - 11697, - 7193, - 5351, - 7310, - 7478, - 5489, - 5526, - 7194, - 4283, - 7191, - 5328, - 4812, - 5528, - 5527, - 5488, - 7475, - 5529, - 776, - 5758, - 4811, - 6223, - 7479, - 7470, - 5480, - 5325, - 7477, - 7318, - 7317, - 11696, - 7313, - 13165, - 6221, + 6249, 7697, 9134, 13500, 13199, 5944, 9052, 9050, 2429, 5139, + 9053, 7102, 4096, 1070, 5948, 1076, 5947, 7432, 5949, 11694, + 5484, 6246, 12147, 778, 3326, 7199, 12498, 11698, 6245, 5327, + 4819, 8677, 5970, 7101, 779, 3894, 9051, 5976, 3329, 5978, + 4282, 11697, 7193, 5351, 7310, 7478, 5489, 5526, 7194, 4283, + 7191, 5328, 4812, 5528, 5527, 5488, 7475, 5529, 776, 5758, + 4811, 6223, 7479, 7470, 5480, 5325, 7477, 7318, 7317, 11696, + 7313, 13165, 6221, ] ) + # fmt: on all_haz = np.arange(haz.intensity.shape[0]) all_haz[pos_no_null] = -1 pos_null = np.argwhere(all_haz > 0).reshape(-1) @@ -254,83 +190,20 @@ def test_cutoff_hazard_region_pass(self): self.assertFalse(id(new_haz) == id(haz)) + # fmt: off pos_no_null = np.array( [ - 6249, - 7697, - 9134, - 13500, - 13199, - 5944, - 9052, - 9050, - 2429, - 5139, - 9053, - 7102, - 4096, - 1070, - 5948, - 1076, - 5947, - 7432, - 5949, - 11694, - 5484, - 6246, - 12147, - 778, - 3326, - 7199, - 12498, - 11698, - 6245, - 5327, - 4819, - 8677, - 5970, - 7101, - 779, - 3894, - 9051, - 5976, - 3329, - 5978, - 4282, - 11697, - 7193, - 5351, - 7310, - 7478, - 5489, - 5526, - 7194, - 4283, - 7191, - 5328, - 4812, - 5528, - 5527, - 5488, - 7475, - 5529, - 776, - 5758, - 4811, - 6223, - 7479, - 7470, - 5480, - 5325, - 7477, - 7318, - 7317, - 11696, - 7313, - 13165, - 6221, + 6249, 7697, 9134, 13500, 13199, 5944, 9052, 9050, 2429, 5139, + 9053, 7102, 4096, 1070, 5948, 1076, 5947, 7432, 5949, 11694, + 5484, 6246, 12147, 778, 3326, 7199, 12498, 11698, 6245, 5327, + 4819, 8677, 5970, 7101, 779, 3894, 9051, 5976, 3329, 5978, + 4282, 11697, 7193, 5351, 7310, 7478, 5489, 5526, 7194, 4283, + 7191, 5328, 4812, 5528, 5527, 5488, 7475, 5529, 776, 5758, + 4811, 6223, 7479, 7470, 5480, 5325, 7477, 7318, 7317, 11696, + 7313, 13165, 6221, ] ) + # fmt: on all_haz = np.arange(haz.intensity.shape[0]) all_haz[pos_no_null] = -1 pos_null = np.argwhere(all_haz > 0).reshape(-1) diff --git a/climada/hazard/test/test_tc_tracks.py b/climada/hazard/test/test_tc_tracks.py index df60bc83ec..c42d5a7a14 100644 --- a/climada/hazard/test/test_tc_tracks.py +++ b/climada/hazard/test/test_tc_tracks.py @@ -815,90 +815,32 @@ def test_interp_track_redundancy_pass(self): def test_interp_origin_pass(self): """Interpolate track to min_time_step crossing lat origin""" tc_track = tc.TCTracks.from_processed_ibtracs_csv(TEST_TRACK) + # fmt: off tc_track.data[0]["lon"].values = np.array( [ - 167.207761, - 168.1, - 168.936535, - 169.728947, - 170.5, - 171.257176, - 171.946822, - 172.5, - 172.871797, - 173.113396, - 173.3, - 173.496375, - 173.725522, - 174.0, - 174.331591, - 174.728961, - 175.2, - 175.747632, - 176.354929, - 177.0, - 177.66677, - 178.362433, - 179.1, - 179.885288, - -179.304661, - -178.5, - -177.726442, - -176.991938, - -176.3, - -175.653595, - -175.053513, - -174.5, - -173.992511, - -173.527342, - -173.1, - -172.705991, - -172.340823, - -172.0, + 167.207761, 168.1, 168.936535, 169.728947, 170.5, + 171.257176, 171.946822, 172.5, 172.871797, 173.113396, + 173.3, 173.496375, 173.725522, 174.0, 174.331591, + 174.728961, 175.2, 175.747632, 176.354929, 177.0, + 177.66677, 178.362433, 179.1, 179.885288, -179.304661, + -178.5, -177.726442, -176.991938, -176.3, -175.653595, + -175.053513, -174.5, -173.992511, -173.527342, -173.1, + -172.705991, -172.340823, -172.0, ] ) tc_track.data[0]["lat"].values = np.array( [ - 40.196053, - 40.6, - 40.930215, - 41.215674, - 41.5, - 41.816354, - 42.156065, - 42.5, - 42.833998, - 43.16377, - 43.5, - 43.847656, - 44.188854, - 44.5, - 44.764269, - 44.991925, - 45.2, - 45.402675, - 45.602707, - 45.8, - 45.995402, - 46.193543, - 46.4, - 46.615718, - 46.82312, - 47.0, - 47.130616, - 47.225088, - 47.3, - 47.369224, - 47.435786, - 47.5, - 47.562858, - 47.628064, - 47.7, - 47.783047, - 47.881586, - 48.0, + 40.196053, 40.6, 40.930215, 41.215674, 41.5, + 41.816354, 42.156065, 42.5, 42.833998, 43.16377, + 43.5, 43.847656, 44.188854, 44.5, 44.764269, + 44.991925, 45.2, 45.402675, 45.602707, 45.8, + 45.995402, 46.193543, 46.4, 46.615718, 46.82312, + 47.0, 47.130616, 47.225088, 47.3, 47.369224, + 47.435786, 47.5, 47.562858, 47.628064, 47.7, + 47.783047, 47.881586, 48.0, ] ) + # fmt: on tc_track.equal_timestep(time_step_h=1) self.assertEqual(tc_track.data[0]["time"].size, 223) @@ -934,91 +876,33 @@ def test_interp_origin_pass(self): def test_interp_origin_inv_pass(self): """Interpolate track to min_time_step crossing lat origin""" tc_track = tc.TCTracks.from_processed_ibtracs_csv(TEST_TRACK) + # fmt: off tc_track.data[0]["lon"].values = np.array( [ - 167.207761, - 168.1, - 168.936535, - 169.728947, - 170.5, - 171.257176, - 171.946822, - 172.5, - 172.871797, - 173.113396, - 173.3, - 173.496375, - 173.725522, - 174.0, - 174.331591, - 174.728961, - 175.2, - 175.747632, - 176.354929, - 177.0, - 177.66677, - 178.362433, - 179.1, - 179.885288, - -179.304661, - -178.5, - -177.726442, - -176.991938, - -176.3, - -175.653595, - -175.053513, - -174.5, - -173.992511, - -173.527342, - -173.1, - -172.705991, - -172.340823, - -172.0, + 167.207761, 168.1, 168.936535, 169.728947, 170.5, + 171.257176, 171.946822, 172.5, 172.871797, 173.113396, + 173.3, 173.496375, 173.725522, 174.0, 174.331591, + 174.728961, 175.2, 175.747632, 176.354929, 177.0, + 177.66677, 178.362433, 179.1, 179.885288, -179.304661, + -178.5, -177.726442, -176.991938, -176.3, -175.653595, + -175.053513, -174.5, -173.992511, -173.527342, -173.1, + -172.705991, -172.340823, -172.0, ] ) tc_track.data[0]["lon"].values = -tc_track.data[0]["lon"].values tc_track.data[0]["lat"].values = np.array( [ - 40.196053, - 40.6, - 40.930215, - 41.215674, - 41.5, - 41.816354, - 42.156065, - 42.5, - 42.833998, - 43.16377, - 43.5, - 43.847656, - 44.188854, - 44.5, - 44.764269, - 44.991925, - 45.2, - 45.402675, - 45.602707, - 45.8, - 45.995402, - 46.193543, - 46.4, - 46.615718, - 46.82312, - 47.0, - 47.130616, - 47.225088, - 47.3, - 47.369224, - 47.435786, - 47.5, - 47.562858, - 47.628064, - 47.7, - 47.783047, - 47.881586, - 48.0, + 40.196053, 40.6, 40.930215, 41.215674, 41.5, + 41.816354, 42.156065, 42.5, 42.833998, 43.16377, + 43.5, 43.847656, 44.188854, 44.5, 44.764269, + 44.991925, 45.2, 45.402675, 45.602707, 45.8, + 45.995402, 46.193543, 46.4, 46.615718, 46.82312, + 47.0, 47.130616, 47.225088, 47.3, 47.369224, + 47.435786, 47.5, 47.562858, 47.628064, 47.7, + 47.783047, 47.881586, 48.0, ] ) + # fmt: on tc_track.equal_timestep(time_step_h=1) self.assertEqual(tc_track.data[0]["time"].size, 223) diff --git a/climada/hazard/test/test_tc_tracks_synth.py b/climada/hazard/test/test_tc_tracks_synth.py index f0b5c0b441..4f35b05b3a 100644 --- a/climada/hazard/test/test_tc_tracks_synth.py +++ b/climada/hazard/test/test_tc_tracks_synth.py @@ -111,119 +111,53 @@ def test_apply_decay_pass(self): tc_synth._apply_land_decay( tc_track.data, v_rel, p_rel, land_geom, s_rel=True, check_plot=False ) - + # fmt: off p_ref = ( - np.array( - [ - 1.010000000000000, - 1.009000000000000, - 1.008000000000000, - 1.006000000000000, - 1.003000000000000, - 1.002000000000000, - 1.001000000000000, - 1.000000000000000, - 1.000000000000000, - 1.001000000000000, - 1.002000000000000, - 1.005000000000000, - 1.007000000000000, - 1.010000000000000, - 1.010000000000000, - 1.010000000000000, - 1.010000000000000, - 1.010000000000000, - 1.010000000000000, - 1.007000000000000, - 1.004000000000000, - 1.000000000000000, - 0.994000000000000, - 0.981000000000000, - 0.969000000000000, - 0.961000000000000, - 0.947000000000000, - 0.933000000000000, - 0.922000000000000, - 0.930000000000000, - 0.937000000000000, - 0.951000000000000, - 0.947000000000000, - 0.943000000000000, - 0.948000000000000, - 0.946000000000000, - 0.941000000000000, - 0.937000000000000, - 0.955000000000000, - 0.9741457117, - 0.99244068917, - 1.00086729492, - 1.00545853355, - 1.00818354609, - 1.00941850023, - 1.00986192053, - 1.00998400565, - ] - ) + np.array([ + 1.010000000000000, 1.009000000000000, 1.008000000000000, + 1.006000000000000, 1.003000000000000, 1.002000000000000, + 1.001000000000000, 1.000000000000000, 1.000000000000000, + 1.001000000000000, 1.002000000000000, 1.005000000000000, + 1.007000000000000, 1.010000000000000, 1.010000000000000, + 1.010000000000000, 1.010000000000000, 1.010000000000000, + 1.010000000000000, 1.007000000000000, 1.004000000000000, + 1.000000000000000, 0.994000000000000, 0.981000000000000, + 0.969000000000000, 0.961000000000000, 0.947000000000000, + 0.933000000000000, 0.922000000000000, 0.930000000000000, + 0.937000000000000, 0.951000000000000, 0.947000000000000, + 0.943000000000000, 0.948000000000000, 0.946000000000000, + 0.941000000000000, 0.937000000000000, 0.955000000000000, + 0.974145711700000, 0.992440689170000, 1.000867294920000, + 1.005458533550000, 1.008183546090000, 1.009418500230000, + 1.009861920530000, 1.009984005650000, + ]) * 1e3 ) self.assertTrue(np.allclose(p_ref, tc_track.data[0]["central_pressure"].values)) v_ref = ( - np.array( - [ - 0.250000000000000, - 0.300000000000000, - 0.300000000000000, - 0.350000000000000, - 0.350000000000000, - 0.400000000000000, - 0.450000000000000, - 0.450000000000000, - 0.450000000000000, - 0.450000000000000, - 0.450000000000000, - 0.450000000000000, - 0.450000000000000, - 0.400000000000000, - 0.400000000000000, - 0.400000000000000, - 0.400000000000000, - 0.450000000000000, - 0.450000000000000, - 0.500000000000000, - 0.500000000000000, - 0.550000000000000, - 0.650000000000000, - 0.800000000000000, - 0.950000000000000, - 1.100000000000000, - 1.300000000000000, - 1.450000000000000, - 1.500000000000000, - 1.250000000000000, - 1.300000000000000, - 1.150000000000000, - 1.150000000000000, - 1.150000000000000, - 1.150000000000000, - 1.200000000000000, - 1.250000000000000, - 1.250000000000000, - 1.200000000000000, - 0.9737967353, - 0.687255951, - 0.4994850556, - 0.3551480462, - 0.2270548036, - 0.1302099557, - 0.0645385918, - 0.0225325851, - ] - ) + np.array([ + 0.250000000000000, 0.300000000000000, 0.300000000000000, + 0.350000000000000, 0.350000000000000, 0.400000000000000, + 0.450000000000000, 0.450000000000000, 0.450000000000000, + 0.450000000000000, 0.450000000000000, 0.450000000000000, + 0.450000000000000, 0.400000000000000, 0.400000000000000, + 0.400000000000000, 0.400000000000000, 0.450000000000000, + 0.450000000000000, 0.500000000000000, 0.500000000000000, + 0.550000000000000, 0.650000000000000, 0.800000000000000, + 0.950000000000000, 1.100000000000000, 1.300000000000000, + 1.450000000000000, 1.500000000000000, 1.250000000000000, + 1.300000000000000, 1.150000000000000, 1.150000000000000, + 1.150000000000000, 1.150000000000000, 1.200000000000000, + 1.250000000000000, 1.250000000000000, 1.200000000000000, + 0.973796735300000, 0.687255951000000, 0.499485055600000, + 0.355148046200000, 0.227054803600000, 0.130209955700000, + 0.064538591800000, 0.022532585100000, + ]) * 1e2 ) - + # fmt: on self.assertTrue( np.allclose(v_ref, tc_track.data[0]["max_sustained_wind"].values) ) @@ -445,137 +379,42 @@ def test_wrong_decay_pass(self): ) track_gen = track.data[0] + # fmt: off track_gen["lat"] = np.array( [ - 28.20340431, - 28.7915261, - 29.38642458, - 29.97836984, - 30.56844404, - 31.16265292, - 31.74820301, - 32.34449825, - 32.92261894, - 33.47430891, - 34.01492525, - 34.56789399, - 35.08810845, - 35.55965893, - 35.94835174, - 36.29355848, - 36.45379561, - 36.32473812, - 36.07552209, - 35.92224784, - 35.84144186, - 35.78298537, - 35.86090718, - 36.02440372, - 36.37555559, - 37.06207765, - 37.73197352, - 37.97524273, - 38.05560287, - 38.21901208, - 38.31486156, - 38.30813367, - 38.28481808, - 38.28410366, - 38.25894812, - 38.20583372, - 38.22741099, - 38.39970022, - 38.68367797, - 39.08329904, - 39.41434629, - 39.424984, - 39.31327716, - 39.30336335, - 39.31714429, - 39.27031932, - 39.30848775, - 39.48759833, - 39.73326595, - 39.96187967, - 40.26954226, - 40.76882202, - 41.40398607, - 41.93809726, - 42.60395785, - 43.57074792, - 44.63816143, - 45.61450458, - 46.68528511, - 47.89209365, + 28.20340431, 28.7915261, 29.38642458, 29.97836984, 30.56844404, + 31.16265292, 31.74820301, 32.34449825, 32.92261894, 33.47430891, + 34.01492525, 34.56789399, 35.08810845, 35.55965893, 35.94835174, + 36.29355848, 36.45379561, 36.32473812, 36.07552209, 35.92224784, + 35.84144186, 35.78298537, 35.86090718, 36.02440372, 36.37555559, + 37.06207765, 37.73197352, 37.97524273, 38.05560287, 38.21901208, + 38.31486156, 38.30813367, 38.28481808, 38.28410366, 38.25894812, + 38.20583372, 38.22741099, 38.39970022, 38.68367797, 39.08329904, + 39.41434629, 39.424984, 39.31327716, 39.30336335, 39.31714429, + 39.27031932, 39.30848775, 39.48759833, 39.73326595, 39.96187967, + 40.26954226, 40.76882202, 41.40398607, 41.93809726, 42.60395785, + 43.57074792, 44.63816143, 45.61450458, 46.68528511, 47.89209365, 49.15580502, ] ) track_gen["lon"] = np.array( [ - -79.20514075, - -79.25243311, - -79.28393082, - -79.32324646, - -79.36668585, - -79.41495519, - -79.45198688, - -79.40580325, - -79.34965443, - -79.36938122, - -79.30294825, - -79.06809546, - -78.70281969, - -78.29418936, - -77.82170609, - -77.30034709, - -76.79004969, - -76.37038827, - -75.98641014, - -75.58383356, - -75.18310414, - -74.7974524, - -74.3797645, - -73.86393572, - -73.37910948, - -73.01059003, - -72.77051313, - -72.68011328, - -72.66864779, - -72.62579773, - -72.56307717, - -72.46607618, - -72.35871353, - -72.31120649, - -72.15537583, - -71.75577051, - -71.25287498, - -70.75527907, - -70.34788946, - -70.17518421, - -70.04446577, - -69.76582749, - -69.44372386, - -69.15881376, - -68.84351922, - -68.47890287, - -68.04184565, - -67.53541437, - -66.94008642, - -66.25596075, - -65.53496635, - -64.83491802, - -64.12962685, - -63.54118808, - -62.72934383, - -61.34915091, - -59.72580755, - -58.24404252, - -56.71972992, - -55.0809336, + -79.20514075, -79.25243311, -79.28393082, -79.32324646, -79.36668585, + -79.41495519, -79.45198688, -79.40580325, -79.34965443, -79.36938122, + -79.30294825, -79.06809546, -78.70281969, -78.29418936, -77.82170609, + -77.30034709, -76.79004969, -76.37038827, -75.98641014, -75.58383356, + -75.18310414, -74.7974524, -74.3797645, -73.86393572, -73.37910948, + -73.01059003, -72.77051313, -72.68011328, -72.66864779, -72.62579773, + -72.56307717, -72.46607618, -72.35871353, -72.31120649, -72.15537583, + -71.75577051, -71.25287498, -70.75527907, -70.34788946, -70.17518421, + -70.04446577, -69.76582749, -69.44372386, -69.15881376, -68.84351922, + -68.47890287, -68.04184565, -67.53541437, -66.94008642, -66.25596075, + -65.53496635, -64.83491802, -64.12962685, -63.54118808, -62.72934383, + -61.34915091, -59.72580755, -58.24404252, -56.71972992, -55.0809336, -53.31524758, ] ) - + # fmt: on v_rel = { 1: 0.002249541544102336, -1: 0.00046889526284203036, diff --git a/climada/hazard/test/test_trop_cyclone.py b/climada/hazard/test/test_trop_cyclone.py index 9996becc3a..b04ae3420c 100644 --- a/climada/hazard/test/test_trop_cyclone.py +++ b/climada/hazard/test/test_trop_cyclone.py @@ -192,41 +192,24 @@ def test_cross_antimeridian(self): def test_windfield_models(self): """Test _tc_from_track function with different wind field models.""" intensity_idx = [0, 1, 2, 3, 80, 100, 120, 200, 220, 250, 260, 295] + # fmt: off intensity_values = [ ( "H08", None, [ - 22.74903, - 23.784691, - 24.82255, - 22.67403, - 27.218706, - 30.593959, - 18.980878, - 24.540069, - 27.826407, - 26.846293, - 0.0, - 34.568898, + 22.74903, 23.784691, 24.82255, 22.67403, 27.218706, + 30.593959, 18.980878, 24.540069, 27.826407, 26.846293, + 0.0, 34.568898, ], ), ( "H10", None, [ - 24.745521, - 25.596484, - 26.475329, - 24.690914, - 28.650107, - 31.584395, - 21.723546, - 26.140293, - 28.94964, - 28.051915, - 18.49378, - 35.312152, + 24.745521, 25.596484, 26.475329, 24.690914, 28.650107, + 31.584395, 21.723546, 26.140293, 28.94964, 28.051915, + 18.49378, 35.312152, ], ), # The following model configurations use recorded wind speeds, while the above use @@ -235,94 +218,49 @@ def test_windfield_models(self): "H10", dict(vmax_from_cen=False, rho_air_const=1.2), [ - 23.702232, - 24.327615, - 24.947161, - 23.589233, - 26.616085, - 29.389295, - 21.338178, - 24.257067, - 26.472543, - 25.662313, - 18.535842, - 31.886041, + 23.702232, 24.327615, 24.947161, 23.589233, 26.616085, + 29.389295, 21.338178, 24.257067, 26.472543, 25.662313, + 18.535842, 31.886041, ], ), ( "H10", dict(vmax_from_cen=False, rho_air_const=None), [ - 24.244162, - 24.835561, - 25.432454, - 24.139294, - 27.127457, - 29.719196, - 21.910658, - 24.692637, - 26.783575, - 25.971516, - 19.005555, - 31.904048, + 24.244162, 24.835561, 25.432454, 24.139294, 27.127457, + 29.719196, 21.910658, 24.692637, 26.783575, 25.971516, + 19.005555, 31.904048, ], ), ( "H10", dict(vmax_from_cen=False, rho_air_const=None, vmax_in_brackets=True), [ - 23.592924, - 24.208169, - 24.817104, - 23.483053, - 26.468975, - 29.221715, - 21.260867, - 24.150879, - 26.34288, - 25.543635, - 18.487385, - 31.904048, + 23.592924, 24.208169, 24.817104, 23.483053, 26.468975, + 29.221715, 21.260867, 24.150879, 26.34288, 25.543635, + 18.487385, 31.904048, ], ), ( "H1980", None, [ - 21.376807, - 21.957217, - 22.569568, - 21.284351, - 24.254226, - 26.971303, - 19.220149, - 21.984516, - 24.196388, - 23.449116, - 0, - 31.550207, + 21.376807, 21.957217, 22.569568, 21.284351, 24.254226, + 26.971303, 19.220149, 21.984516, 24.196388, 23.449116, + 0, 31.550207, ], ), ( "ER11", None, [ - 23.565332, - 24.931413, - 26.360758, - 23.490333, - 29.601171, - 34.522795, - 18.996389, - 26.102109, - 30.780737, - 29.498453, - 0, - 38.368805, + 23.565332, 24.931413, 26.360758, 23.490333, 29.601171, + 34.522795, 18.996389, 26.102109, 30.780737, 29.498453, + 0, 38.368805, ], ), ] - + # fmt: on tc_track = TCTracks.from_processed_ibtracs_csv(TEST_TRACK) tc_track.equal_timestep() tc_track.data = tc_track.data[:1] diff --git a/climada/util/constants.py b/climada/util/constants.py index a4e595aaae..90f352218b 100644 --- a/climada/util/constants.py +++ b/climada/util/constants.py @@ -139,239 +139,31 @@ TEST_UNC_OUTPUT_COSTBEN = "test_unc_output_costben" """Demo uncertainty costben output""" +# fmt: off ISIMIP_NATID_TO_ISO = [ - "", - "ABW", - "AFG", - "AGO", - "AIA", - "ALB", - "AND", - "ANT", - "ARE", - "ARG", - "ARM", - "ASM", - "ATG", - "AUS", - "AUT", - "AZE", - "BDI", - "BEL", - "BEN", - "BFA", - "BGD", - "BGR", - "BHR", - "BHS", - "BIH", - "BLR", - "BLZ", - "BMU", - "BOL", - "BRA", - "BRB", - "BRN", - "BTN", - "BWA", - "CAF", - "CAN", - "CHE", - "CHL", - "CHN", - "CIV", - "CMR", - "COD", - "COG", - "COK", - "COL", - "COM", - "CPV", - "CRI", - "CUB", - "CYM", - "CYP", - "CZE", - "DEU", - "DJI", - "DMA", - "DNK", - "DOM", - "DZA", - "ECU", - "EGY", - "ERI", - "ESP", - "EST", - "ETH", - "FIN", - "FJI", - "FLK", - "FRA", - "FRO", - "FSM", - "GAB", - "GBR", - "GEO", - "GGY", - "GHA", - "GIB", - "GIN", - "GLP", - "GMB", - "GNB", - "GNQ", - "GRC", - "GRD", - "GTM", - "GUF", - "GUM", - "GUY", - "HKG", - "HND", - "HRV", - "HTI", - "HUN", - "IDN", - "IMN", - "IND", - "IRL", - "IRN", - "IRQ", - "ISL", - "ISR", - "ITA", - "JAM", - "JEY", - "JOR", - "JPN", - "KAZ", - "KEN", - "KGZ", - "KHM", - "KIR", - "KNA", - "KOR", - "KWT", - "LAO", - "LBN", - "LBR", - "LBY", - "LCA", - "LIE", - "LKA", - "LSO", - "LTU", - "LUX", - "LVA", - "MAC", - "MAR", - "MCO", - "MDA", - "MDG", - "MDV", - "MEX", - "MHL", - "MKD", - "MLI", - "MLT", - "MMR", - "MNG", - "MNP", - "MOZ", - "MRT", - "MSR", - "MTQ", - "MUS", - "MWI", - "MYS", - "MYT", - "NAM", - "NCL", - "NER", - "NFK", - "NGA", - "NIC", - "NIU", - "NLD", - "NOR", - "NPL", - "NRU", - "NZL", - "OMN", - "PAK", - "PAN", - "PCN", - "PER", - "PHL", - "PLW", - "PNG", - "POL", - "PRI", - "PRK", - "PRT", - "PRY", - "PSE", - "PYF", - "QAT", - "REU", - "ROU", - "RUS", - "RWA", - "SAU", - "SCG", - "SDN", - "SEN", - "SGP", - "SHN", - "SJM", - "SLB", - "SLE", - "SLV", - "SMR", - "SOM", - "SPM", - "STP", - "SUR", - "SVK", - "SVN", - "SWE", - "SWZ", - "SYC", - "SYR", - "TCA", - "TCD", - "TGO", - "THA", - "TJK", - "TKL", - "TKM", - "TLS", - "TON", - "TTO", - "TUN", - "TUR", - "TUV", - "TWN", - "TZA", - "UGA", - "UKR", - "URY", - "USA", - "UZB", - "VCT", - "VEN", - "VGB", - "VIR", - "VNM", - "VUT", - "WLF", - "WSM", - "YEM", - "ZAF", - "ZMB", - "ZWE", + "", "ABW", "AFG", "AGO", "AIA", "ALB", "AND", "ANT", "ARE", "ARG", "ARM", + "ASM", "ATG", "AUS", "AUT", "AZE", "BDI", "BEL", "BEN", "BFA", "BGD", "BGR", + "BHR", "BHS", "BIH", "BLR", "BLZ", "BMU", "BOL", "BRA", "BRB", "BRN", "BTN", + "BWA", "CAF", "CAN", "CHE", "CHL", "CHN", "CIV", "CMR", "COD", "COG", "COK", + "COL", "COM", "CPV", "CRI", "CUB", "CYM", "CYP", "CZE", "DEU", "DJI", "DMA", + "DNK", "DOM", "DZA", "ECU", "EGY", "ERI", "ESP", "EST", "ETH", "FIN", "FJI", + "FLK", "FRA", "FRO", "FSM", "GAB", "GBR", "GEO", "GGY", "GHA", "GIB", "GIN", + "GLP", "GMB", "GNB", "GNQ", "GRC", "GRD", "GTM", "GUF", "GUM", "GUY", "HKG", + "HND", "HRV", "HTI", "HUN", "IDN", "IMN", "IND", "IRL", "IRN", "IRQ", "ISL", + "ISR", "ITA", "JAM", "JEY", "JOR", "JPN", "KAZ", "KEN", "KGZ", "KHM", "KIR", + "KNA", "KOR", "KWT", "LAO", "LBN", "LBR", "LBY", "LCA", "LIE", "LKA", "LSO", + "LTU", "LUX", "LVA", "MAC", "MAR", "MCO", "MDA", "MDG", "MDV", "MEX", "MHL", + "MKD", "MLI", "MLT", "MMR", "MNG", "MNP", "MOZ", "MRT", "MSR", "MTQ", "MUS", + "MWI", "MYS", "MYT", "NAM", "NCL", "NER", "NFK", "NGA", "NIC", "NIU", "NLD", + "NOR", "NPL", "NRU", "NZL", "OMN", "PAK", "PAN", "PCN", "PER", "PHL", "PLW", + "PNG", "POL", "PRI", "PRK", "PRT", "PRY", "PSE", "PYF", "QAT", "REU", "ROU", + "RUS", "RWA", "SAU", "SCG", "SDN", "SEN", "SGP", "SHN", "SJM", "SLB", "SLE", + "SLV", "SMR", "SOM", "SPM", "STP", "SUR", "SVK", "SVN", "SWE", "SWZ", "SYC", + "SYR", "TCA", "TCD", "TGO", "THA", "TJK", "TKL", "TKM", "TLS", "TON", "TTO", + "TUN", "TUR", "TUV", "TWN", "TZA", "UGA", "UKR", "URY", "USA", "UZB", "VCT", + "VEN", "VGB", "VIR", "VNM", "VUT", "WLF", "WSM", "YEM", "ZAF", "ZMB", "ZWE", ] +# fmt: on """ISO 3166 alpha-3 codes of countries used in ISIMIP_GPWV3_NATID_150AS""" NONISO_REGIONS = [ From 28f8f151b91cf39e110d4556fb0e3f5342121950 Mon Sep 17 00:00:00 2001 From: Emanuel Schmid <51439563+emanuel-schmid@users.noreply.github.com> Date: Mon, 21 Oct 2024 18:41:36 +0200 Subject: [PATCH 3/4] exposures-init-geometry (#890) * wip * climada.entity.Exposures rewrite __init__ * wip * climada.entity.Exposures: refactor __init__ * adaptations to changed Exposures structure: gdf.latitude and gdf.longitude have been eliminated * adaptations to changed Exposures structure: gdf.latitude and gdf.longitude have been eliminated * adaptations to changed Exposures structure: gdf.latitude and gdf.longitude have been eliminated * centroids.Centroids.from_exposures() will always work as geometry column is granted * adaptation to eliminated lat/lon columns * hazard.Centroids: region_id and on_land are not necessarily present in the data frame * Exposures() requires geometry or lat/lon * cangelog * points_to_raster function getting more complicated, considering dataframes with geometry column instead fo lat/lon * pydoc argument description fix * fix typo (bug really) * exposures: remove meta from attributes * introduce exposures properties, region_id etc. consequently use region_id property where applicable * rename and use hazard_impf and hazard_centroids * lint * Exposures: rename pmeta property to _meta * fix changelog * exposures: gracefully handle empty exposures * exposures: linting * Update climada/entity/exposures/base.py Co-authored-by: Samuel Juhel <10011382+spjuhel@users.noreply.github.com> * petals compatibility test: try and switch the petals branch * fix merging mistake * deprecate set_geomety_points * deprecate set_geomety_points * remove properties and methods from the pydoc attributes section * typo * typo * deprecate set_geomety_points * Exposures.__init__: no need for a temporary geodataframe as u_coord.set_df_geometry_points does not use a scheduler anymore * Exposures.base: amend pydocs * Exposures.base: use get_impf_column in hazard_impf * pydoc Exposures.__init__ * pydoc Exposures.__init__ * changelog updates * deprecate set_geomety_points * jenkins: align with petals * exposures tutorial: listing attributes and properties * exposures: don't do anything in check, update Tutorial * format * import Impact from climada.engine.impact in order to avoid circular imports * avoid circular imports * pre-commit run --all-files * setup: DEPS_TEST update * format all * exposures tutorial: add information about optional columns * clean up tutorials --------- Co-authored-by: Samuel Juhel <10011382+spjuhel@users.noreply.github.com> --- .github/workflows/ci.yml | 2 +- CHANGELOG.md | 18 + climada/engine/forecast.py | 2 +- climada/engine/impact.py | 8 +- climada/engine/impact_calc.py | 1 + climada/engine/test/test_impact.py | 15 +- climada/engine/test/test_impact_calc.py | 20 +- climada/engine/unsequa/calc_impact.py | 4 +- climada/entity/exposures/base.py | 571 ++++--- climada/entity/exposures/litpop/litpop.py | 58 +- climada/entity/exposures/test/test_base.py | 288 ++-- climada/entity/exposures/test/test_mat.py | 52 +- climada/entity/measures/base.py | 8 +- climada/entity/measures/test/test_base.py | 84 +- climada/hazard/centroids/centr.py | 28 +- climada/hazard/centroids/test/test_centr.py | 10 +- climada/hazard/tc_tracks.py | 4 +- climada/test/test_api_client.py | 4 +- climada/test/test_litpop_integr.py | 58 +- climada/test/test_plot.py | 6 +- climada/util/coordinates.py | 41 +- climada/util/lines_polys_handler.py | 3 +- climada/util/test/test_lines_polys_handler.py | 30 +- doc/tutorial/1_main_climada.ipynb | 6 +- doc/tutorial/climada_engine_Forecast.ipynb | 4 +- doc/tutorial/climada_engine_Impact.ipynb | 10 +- doc/tutorial/climada_entity_Exposures.ipynb | 1463 ++++++++--------- doc/tutorial/climada_entity_LitPop.ipynb | 5 +- doc/tutorial/climada_hazard_TropCyclone.ipynb | 1 - .../eca_san_salvador/San_Salvador_Risk.ipynb | 4 +- .../eca_san_salvador/functions_ss.py | 2 - .../petals_regression_test/Jenkinsfile | 2 +- .../petals_regression_test/run_integ_test.sh | 3 +- 33 files changed, 1365 insertions(+), 1450 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bd11ffc89b..a24bea6a24 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -74,6 +74,6 @@ jobs: needs: build-and-test with: core_branch: ${{ github.ref }} - petals_branch: develop + petals_branch: feature/exposures_crs permissions: checks: write diff --git a/CHANGELOG.md b/CHANGELOG.md index 2bf7b460f2..19cb7818a7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,12 +13,26 @@ Code freeze date: YYYY-MM-DD ### Added - `climada.util.interpolation` module for inter- and extrapolation util functions used in local exceedance intensity and return period functions [#930](https://github.com/CLIMADA-project/climada_python/pull/930) +- `climada.exposures.exposures.Exposures.geometry` property +- `climada.exposures.exposures.Exposures.latitude` property +- `climada.exposures.exposures.Exposures.longitude` property +- `climada.exposures.exposures.Exposures.value` property +- `climada.exposures.exposures.Exposures.region_id` property +- `climada.exposures.exposures.Exposures.category_id` property +- `climada.exposures.exposures.Exposures.cover` property +- `climada.exposures.exposures.Exposures.hazard_impf` method +- `climada.exposures.exposures.Exposures.hazard_centroids` method ### Changed - Improved scaling factors implemented in `climada.hazard.trop_cyclone.apply_climate_scenario_knu` to model the impact of climate changes to tropical cyclones [#734](https://github.com/CLIMADA-project/climada_python/pull/734) - In `climada.util.plot.geo_im_from_array`, NaNs are plotted in gray while cells with no centroid are not plotted [#929](https://github.com/CLIMADA-project/climada_python/pull/929) - Renamed `climada.util.plot.subplots_from_gdf` to `climada.util.plot.plot_from_gdf` [#929](https://github.com/CLIMADA-project/climada_python/pull/929) +- Exposures complete overhaul. Notably + - the _geometry_ column of the inherent `GeoDataFrame` is set up at initialization + - latitude and longitude column are no longer present there (the according arrays can be retrieved as properties of the Exposures object: `exp.latitude` instead of `exp.gdf.latitude.values`). + - `Exposures.gdf` has been renamed to `Exposures.data` (it still works though, as it is a property now pointing to the latter) + - the `check` method does not add a default "IMPF_" column to the GeoDataFrame anymore ### Fixed @@ -27,6 +41,10 @@ Code freeze date: YYYY-MM-DD ### Deprecated +- `climada.entity.exposures.Exposures.meta` attribute +- `climada.entity.exposures.Exposures.set_lat_lon` method +- `climada.entity.exposures.Exposures.set_geometry_points` method + ### Removed ## 5.0.0 diff --git a/climada/engine/forecast.py b/climada/engine/forecast.py index f123a67ed0..49847cf5ee 100644 --- a/climada/engine/forecast.py +++ b/climada/engine/forecast.py @@ -187,7 +187,7 @@ def __init__( if exposure_name is None: try: self.exposure_name = u_coord.country_to_iso( - exposure.gdf["region_id"].unique()[0], "name" + np.unique(exposure.region_id)[0], "name" ) except (KeyError, AttributeError): self.exposure_name = "custom" diff --git a/climada/engine/impact.py b/climada/engine/impact.py index 58292ab9ce..b38e8c79ca 100644 --- a/climada/engine/impact.py +++ b/climada/engine/impact.py @@ -259,10 +259,7 @@ def from_eih(cls, exposures, hazard, at_event, eai_exp, aai_agg, imp_mat=None): date=hazard.date, frequency=hazard.frequency, frequency_unit=hazard.frequency_unit, - coord_exp=np.stack( - [exposures.gdf["latitude"].values, exposures.gdf["longitude"].values], - axis=1, - ), + coord_exp=np.stack([exposures.latitude, exposures.longitude], axis=1), crs=exposures.crs, unit=exposures.value_unit, tot_value=exposures.centroids_total_value(hazard), @@ -733,9 +730,6 @@ def plot_raster_eai_exposure( cartopy.mpl.geoaxes.GeoAxesSubplot """ eai_exp = self._build_exp() - # we need to set geometry points because the `plot_raster` method accesses the - # exposures' `gdf.crs` property, which raises an error when geometry is not set - eai_exp.set_geometry_points() axis = eai_exp.plot_raster( res, raster_res, diff --git a/climada/engine/impact_calc.py b/climada/engine/impact_calc.py index 713cda3242..d344750cf2 100644 --- a/climada/engine/impact_calc.py +++ b/climada/engine/impact_calc.py @@ -119,6 +119,7 @@ def impact( apply_deductible_to_mat : apply deductible to impact matrix apply_cover_to_mat : apply cover to impact matrix """ + # TODO: consider refactoring, making use of Exposures.hazard_impf # check for compatibility of exposures and hazard type if all( name not in self.exposures.gdf.columns diff --git a/climada/engine/test/test_impact.py b/climada/engine/test/test_impact.py index 6c901f989a..54e98e3eb4 100644 --- a/climada/engine/test/test_impact.py +++ b/climada/engine/test/test_impact.py @@ -112,8 +112,7 @@ def test_from_eih_pass(self): np.testing.assert_array_almost_equal(imp.eai_exp, fake_eai_exp) np.testing.assert_array_almost_equal(imp.at_event, fake_at_event) np.testing.assert_array_almost_equal( - imp.coord_exp, - np.stack([exp.gdf["latitude"].values, exp.gdf["longitude"].values], axis=1), + imp.coord_exp, np.stack([exp.latitude, exp.longitude], axis=1) ) def test_pyproj_crs(self): @@ -987,9 +986,9 @@ def test__build_exp(self): imp = dummy_impact() exp = imp._build_exp() - np.testing.assert_array_equal(imp.eai_exp, exp.gdf["value"]) - np.testing.assert_array_equal(imp.coord_exp[:, 0], exp.gdf["latitude"]) - np.testing.assert_array_equal(imp.coord_exp[:, 1], exp.gdf["longitude"]) + np.testing.assert_array_equal(imp.eai_exp, exp.value) + np.testing.assert_array_equal(imp.coord_exp[:, 0], exp.latitude) + np.testing.assert_array_equal(imp.coord_exp[:, 1], exp.longitude) self.assertTrue(u_coord.equal_crs(exp.crs, imp.crs)) self.assertEqual(exp.value_unit, imp.unit) self.assertEqual(exp.ref_year, 0) @@ -1000,9 +999,9 @@ def test__exp_build_event(self): imp = dummy_impact() event_id = imp.event_id[1] exp = imp._build_exp_event(event_id=event_id) - np.testing.assert_array_equal(imp.imp_mat[1].todense().A1, exp.gdf["value"]) - np.testing.assert_array_equal(imp.coord_exp[:, 0], exp.gdf["latitude"]) - np.testing.assert_array_equal(imp.coord_exp[:, 1], exp.gdf["longitude"]) + np.testing.assert_array_equal(imp.imp_mat[1].todense().A1, exp.value) + np.testing.assert_array_equal(imp.coord_exp[:, 0], exp.latitude) + np.testing.assert_array_equal(imp.coord_exp[:, 1], exp.longitude) self.assertTrue(u_coord.equal_crs(exp.crs, imp.crs)) self.assertEqual(exp.value_unit, imp.unit) self.assertEqual(exp.ref_year, 0) diff --git a/climada/engine/test/test_impact_calc.py b/climada/engine/test/test_impact_calc.py index 3f19e26327..8a004cf670 100644 --- a/climada/engine/test/test_impact_calc.py +++ b/climada/engine/test/test_impact_calc.py @@ -50,8 +50,8 @@ def check_impact(self, imp, haz, exp, aai_agg, eai_exp, at_event, imp_mat_array= """Test properties of imapcts""" self.assertEqual(len(haz.event_id), len(imp.at_event)) self.assertIsInstance(imp, Impact) - np.testing.assert_allclose(imp.coord_exp[:, 0], exp.gdf["latitude"]) - np.testing.assert_allclose(imp.coord_exp[:, 1], exp.gdf["longitude"]) + np.testing.assert_allclose(imp.coord_exp[:, 0], exp.latitude) + np.testing.assert_allclose(imp.coord_exp[:, 1], exp.longitude) self.assertAlmostEqual(imp.aai_agg, aai_agg, 3) np.testing.assert_allclose(imp.eai_exp, eai_exp, rtol=1e-5) np.testing.assert_allclose(imp.at_event, at_event, rtol=1e-5) @@ -490,7 +490,12 @@ def test_minimal_exp_gdf(self): def test_stitch_impact_matrix(self): """Check how sparse matrices from a generator are stitched together""" icalc = ImpactCalc( - Exposures({"blank": [1, 2, 3, 4]}), ImpactFuncSet(), Hazard() + Exposures( + {"blank": [1, 2, 3, 4]}, + geometry=[], + ), + ImpactFuncSet(), + Hazard(), ) icalc.hazard.event_id = np.array([1, 2, 3]) icalc._orig_exp_idx = np.array([0, 1, 2, 3]) @@ -524,7 +529,14 @@ def test_apply_deductible_to_mat(self): def test_stitch_risk_metrics(self): """Test computing risk metrics from an impact matrix generator""" - icalc = ImpactCalc(Exposures({"blank": [1, 2, 3]}), ImpactFuncSet(), Hazard()) + icalc = ImpactCalc( + Exposures( + {"blank": [1, 2, 3]}, + geometry=[], + ), + ImpactFuncSet(), + Hazard(), + ) icalc.hazard.event_id = np.array([1, 2]) icalc.hazard.frequency = np.array([2, 0.5]) icalc._orig_exp_idx = np.array([0, 1, 2]) diff --git a/climada/engine/unsequa/calc_impact.py b/climada/engine/unsequa/calc_impact.py index 061b3e3a2a..93d3fc6584 100644 --- a/climada/engine/unsequa/calc_impact.py +++ b/climada/engine/unsequa/calc_impact.py @@ -240,7 +240,9 @@ def uncertainty( if calc_eai_exp: exp = self.exp_input_var.evaluate() - coord_df = exp.gdf[["latitude", "longitude"]] + coord_df = pd.DataFrame( + dict(latitude=exp.latitude, longitude=exp.longitude) + ) else: coord_df = pd.DataFrame([]) diff --git a/climada/entity/exposures/base.py b/climada/entity/exposures/base.py index 5087a237fb..0c62af1b53 100644 --- a/climada/entity/exposures/base.py +++ b/climada/entity/exposures/base.py @@ -21,6 +21,7 @@ __all__ = ["Exposures", "add_sea", "INDICATOR_IMPF", "INDICATOR_CENTR"] + import copy import logging import warnings @@ -32,7 +33,8 @@ import numpy as np import pandas as pd import rasterio -from geopandas import GeoDataFrame +from deprecation import deprecated +from geopandas import GeoDataFrame, GeoSeries, points_from_xy from mpl_toolkits.axes_grid1 import make_axes_locatable from rasterio.warp import Resampling @@ -81,7 +83,7 @@ class Exposures: - """geopandas GeoDataFrame with metada and columns (pd.Series) defined in + """geopandas GeoDataFrame with metadata and columns (pd.Series) defined in Attributes. Attributes @@ -89,44 +91,19 @@ class Exposures: description : str metadata - description of content and origin of the data ref_year : int - metada - reference year + metadata - reference year value_unit : str - metada - unit of the exposures values - latitude : pd.Series - latitude - longitude : pd.Series - longitude - value : pd.Series - a value for each exposure - impf_SUFFIX : pd.Series, optional - e.g. impf_TC. impact functions id for hazard TC. - There might be different hazards defined: impf_TC, impf_FL, ... - If not provided, set to default ``impf_`` with ids 1 in check(). - geometry : pd.Series, optional - geometry of type Point of each instance. - Computed in method set_geometry_points(). - meta : dict - dictionary containing corresponding raster properties (if any): - width, height, crs and transform must be present at least (transform needs - to contain upper left corner!). Exposures might not contain all the points - of the corresponding raster. Not used in internal computations. - deductible : pd.Series, optional - deductible value for each exposure - cover : pd.Series, optional - cover value for each exposure - category_id : pd.Series, optional - category id for each exposure - region_id : pd.Series, optional - region id for each exposure - centr_SUFFIX : pd.Series, optional - e.g. centr_TC. centroids index for hazard - TC. There might be different hazards defined: centr_TC, centr_FL, ... - Computed in method assign_centroids(). + metadata - unit of the exposures values + data : GeoDataFrame + containing at least the columns 'geometry' and 'value' for locations and assets + optionally more, a.o., 'region_id', 'category_id', columns for (hazard specific) assigned + centroids and (hazard specific) impact funcitons. """ - _metadata = ["description", "ref_year", "value_unit", "meta"] + _metadata = ["description", "ref_year", "value_unit"] + """List of attributes, which are by default read, e.g., from hdf5""" - vars_oblig = ["value", "latitude", "longitude"] + vars_oblig = ["value", "geometry"] """Name of the variables needed to compute the impact.""" vars_def = [INDICATOR_IMPF, INDICATOR_IMPF_OLD] @@ -145,110 +122,308 @@ class Exposures: @property def crs(self): """Coordinate Reference System, refers to the crs attribute of the inherent GeoDataFrame""" + return self.data.geometry.crs + + @property + def gdf(self): + """Inherent GeoDataFrame""" + return self.data + + @property + def latitude(self): + """Latitude array of exposures""" + return self.data.geometry.y.values + + @property + def longitude(self): + """Longitude array of exposures""" + return self.data.geometry.x.values + + @property + def geometry(self): + """Geometry array of exposures""" + return self.data.geometry.values + + @property + def value(self): + """Geometry array of exposures""" + if "value" in self.data.columns: + return self.data["value"].values + return None + + @property + def region_id(self): + """Region id for each exposure + + Returns + ------- + np.array of int + """ + if "region_id" in self.data.columns: + return self.data["region_id"].values + return None + + @property + def category_id(self): + """Category id for each exposure + + Returns + ------- + np.array + """ + if "category_id" in self.data.columns: + return self.data["category_id"].values + return None + + @property + def cover(self): + """Cover value for each exposures + + Returns + ------- + np.array of float + """ + if "cover" in self.data.columns: + return self.data["cover"].values + return None + + @property + def deductible(self): + """Deductible value for each exposures + + Returns + ------- + np.array of float + """ + if "deductible" in self.data.columns: + return self.data["deductible"].values + return None + + def hazard_impf(self, haz_type=""): + """Get impact functions for a given hazard + + Parameters + ---------- + haz_type : str + hazard type, as in the hazard's.haz_type + which is the HAZ_TYPE constant of the hazard's module + + Returns + ------- + np.array of int + impact functions for the given hazard + """ + col_name = self.get_impf_column(haz_type) + return self.data[col_name].values + + def hazard_centroids(self, haz_type=""): + """Get centroids for a given hazard + + Parameters + ---------- + haz_type : str + hazard type, as in the hazard's.haz_type + which is the HAZ_TYPE constant of the hazard's module + + Returns + ------- + np.array of int + centroids index for the given hazard + """ + if haz_type and INDICATOR_CENTR + haz_type in self.data.columns: + return self.data[INDICATOR_CENTR + haz_type].values + if INDICATOR_CENTR in self.data.columns: + return self.data[INDICATOR_CENTR].values + raise ValueError("Missing hazard centroids.") + + def derive_raster(self): + """Metadata dictionary, containing raster information, derived from the geometry""" + if not self.data.size: + return None + _r, meta = u_coord.points_to_raster(self.data) + return meta + + @staticmethod + def _consolidate( + alternative_data, name, value, default=None, equals=lambda x, y: x == y + ): + """helper function for __init__ for consolidation of arguments. + Most arguments of the __init__ function can be provided either explicitly, by themselves + or as part of the input data. + This method finds the specific argument from these alternative sources. In case of ambiguity + it checks for any discrepancy. In case of missing souorces it returns the default. + + Parameters + ---------- + alternative_data: + container of general data with named items + name: + name of the item in the alternative_data + value: + specific data, could be None + default: + default value in case of both specific and general data don't yield a result + equals: + the equality function to check for discrepancies + """ + altvalue = alternative_data.get(name) + if value is None and altvalue is None: + return default + if value is None: + return altvalue + if altvalue is None: + return value try: - return self.gdf.geometry.crs or self.meta.get("crs") - except AttributeError: # i.e., no geometry, crs is assumed to be a property - # In case of gdf without geometry, empty or before set_geometry_points was called - return self.meta.get("crs") + if all(equals(altvalue, value)): + return value + except TypeError: + if equals(altvalue, value): + return value + raise ValueError( + f"conflicting arguments: the given {name}" + " is different from their corresponding value(s) in meta or data" + ) def __init__( self, - *args, + data=None, + index=None, + columns=None, + dtype=None, + copy=False, # pylint: disable=redefined-outer-name + geometry=None, + crs=None, meta=None, description=None, - ref_year=DEF_REF_YEAR, - value_unit=DEF_VALUE_UNIT, - crs=None, - **kwargs, + ref_year=None, + value_unit=None, + value=None, + lat=None, + lon=None, ): - """Creates an Exposures object from a GeoDataFrame - + """ Parameters ---------- - args : - Arguments of the GeoDataFrame constructor - kwargs : - Named arguments of the GeoDataFrame constructor, additionally + data : dict, iterable, DataFrame, GeoDataFrame, ndarray + data of the initial DataFrame, see ``pandas.DataFrame()``. + Used to initialize values for "region_id", "category_id", "cover", "deductible", + "value", "geometry", "impf_[hazard type]". + columns : Index or array, optional + Columns of the initial DataFrame, see ``pandas.DataFrame()``. + To be provided if `data` is an array + index : Index or array, optional + Columns of the initial DataFrame, see ``pandas.DataFrame()``. + can optionally be provided if `data` is an array or for defining a specific row index + dtype : dtype, optional + data type of the initial DataFrame, see ``pandas.DataFrame()``. + Can be used to assign specific data types to the columns in `data` + copy : bool, optional + Whether to make a copy of the input `data`, see ``pandas.DataFrame()``. + Default is False, i.e. by default `data` may be altered by the ``Exposures`` object. + geometry : array, optional + Geometry column, see ``geopandas.GeoDataFrame()``. + Must be provided if `lat` and `lon` are None and `data` has no "geometry" column. + crs : value, optional + Coordinate Reference System, see ``geopandas.GeoDataFrame()``. meta : dict, optional - Metadata dictionary. Default: {} (empty dictionary) + Metadata dictionary. Default: {} (empty dictionary). + May be used to provide any of `description`, `ref_year`, `value_unit` and `crs` description : str, optional Default: None ref_year : int, optional Reference Year. Defaults to the entry of the same name in `meta` or 2018. value_unit : str, optional Unit of the exposed value. Defaults to the entry of the same name in `meta` or 'USD'. - crs : object, anything accepted by pyproj.CRS.from_user_input - Coordinate reference system. Defaults to the entry of the same name in `meta`, or to - the CRS of the GeoDataFrame (if provided) or to 'epsg:4326'. + value : array, optional + Exposed value column. + Must be provided if `data` has no "value" column + lat : array, optional + Latitude column. + Can be provided together with `lon`, alternative to `geometry` + lon : array, optional + Longitude column. + Can be provided together with `lat`, alternative to `geometry` """ - # meta data - self.meta = {} if meta is None else meta - if not isinstance(self.meta, dict): - raise ValueError("meta must be a dictionary") - self.description = ( - self.meta.get("description") if description is None else description - ) - self.ref_year = ( - self.meta.get("ref_year", DEF_REF_YEAR) if ref_year is None else ref_year - ) - self.value_unit = ( - self.meta.get("value_unit", DEF_VALUE_UNIT) - if value_unit is None - else value_unit + geodata = GeoDataFrame( + data=data, index=index, columns=columns, dtype=dtype, copy=False ) - # remaining generic attributes from derived classes - for mda in type(self)._metadata: - if mda not in Exposures._metadata: - if mda in kwargs: - setattr(self, mda, kwargs.pop(mda)) - elif mda in self.meta: - setattr(self, mda, self.meta[mda]) - else: - setattr(self, mda, None) - - # crs (property) and geometry - data = args[0] if args else kwargs.get("data", {}) - try: - data_crs = data.geometry.crs - except AttributeError: - data_crs = None - if data_crs and data.crs and not u_coord.equal_crs(data_crs, data.crs): - raise ValueError("Inconsistent crs definition in data and data.geometry") - - crs = ( - crs - if crs is not None - else ( - self.meta["crs"] - if "crs" in self.meta - else data_crs if data_crs else None - ) + geometry = self._consolidate(geodata, "geometry", geometry) + value = self._consolidate(geodata, "value", value) + + # both column names are accepted, lat and latitude, respectively lon and longitude. + lat = self._consolidate(geodata, "latitude", lat) + lat = self._consolidate(geodata, "lat", lat) + lon = self._consolidate(geodata, "longitude", lon) + lon = self._consolidate(geodata, "lon", lon) + + # if lat then lon and vice versa: not xor + if (lat is None) ^ (lon is None): + raise ValueError("either provide both, lat and lon, or none of them") + # either geometry or lat/lon + if (lat is None) and (geometry is None): + if geodata.shape[0] == 0: + geodata = geodata.set_geometry([]) + geometry = geodata.geometry + else: + raise ValueError("either provide geometry or lat/lon") + + meta = meta or {} + if not isinstance(meta, dict): + raise TypeError("meta must be of type dict") + + self.description = self._consolidate(meta, "description", description) + self.ref_year = self._consolidate(meta, "ref_year", ref_year, DEF_REF_YEAR) + self.value_unit = self._consolidate( + meta, "value_unit", value_unit, DEF_VALUE_UNIT ) - if "crs" in self.meta and not u_coord.equal_crs(self.meta["crs"], crs): - raise ValueError( - "Inconsistent CRS definition, crs and meta arguments don't match" - ) - if data_crs and not u_coord.equal_crs(data_crs, crs): - raise ValueError( - "Inconsistent CRS definition, data doesn't match meta or crs argument" - ) - if not crs: - crs = DEF_CRS - geometry = kwargs.get("geometry") - if geometry and isinstance(geometry, str): - raise ValueError( + crs = self._consolidate(meta, "crs", crs, equals=u_coord.equal_crs) + + # finalize geometry, set crs + if geometry is None: # -> calculate from lat/lon + geometry = points_from_xy(x=lon, y=lat, crs=crs or DEF_CRS) + elif isinstance(geometry, str): # -> raise exception + raise TypeError( "Exposures is not able to handle customized 'geometry' column names." ) + elif isinstance(geometry, GeoSeries): # -> set crs if necessary + if crs and not u_coord.equal_crs(crs, geometry.crs): + geometry = geometry.set_crs(crs, allow_override=True) + if not crs and not geometry.crs: + geometry = geometry.set_crs(DEF_CRS) + else: # e.g. a list of Points -> turn into GeoSeries + geometry = GeoSeries(geometry, crs=crs or DEF_CRS) + + self.data = GeoDataFrame( + data=geodata.loc[ + :, + [ + c + for c in geodata.columns + if c not in ["geometry", "latitude", "longitude", "lat", "lon"] + ], + ], + copy=copy, + geometry=geometry, + ) - # make the data frame - self.set_gdf(GeoDataFrame(*args, **kwargs), crs=crs) + # add a 'value' column in case it is not already part of data + if value is not None and self.data.get("value") is None: + self.data["value"] = value def __str__(self): return "\n".join( [f"{md}: {self.__dict__[md]}" for md in type(self)._metadata] - + [f"crs: {self.crs}", "data:", str(self.gdf)] + + [ + f"crs: {self.crs}", + f"data: ({self.data.shape[0]} entries)", + ( + str(self.data) + if self.data.shape[0] < 10 + else str(pd.concat([self.data[:4], self.data[-4:]])) + ), + ] ) def _access_item(self, *args): @@ -266,8 +441,6 @@ def check(self): """Check Exposures consistency. Reports missing columns in log messages. - If no ``impf_*`` column is present in the dataframe, a default column ``impf_`` is added - with default impact function id 1. """ # mandatory columns for var in self.vars_oblig: @@ -293,8 +466,7 @@ def check(self): for col in self.gdf.columns if col.startswith(INDICATOR_IMPF) or col.startswith(INDICATOR_IMPF_OLD) ]: - LOGGER.info("Setting %s to default impact functions ids 1.", INDICATOR_IMPF) - self.gdf[INDICATOR_IMPF] = 1 + LOGGER.warning("There are no impact functions assigned to the exposures") # optional columns except centr_* for var in sorted(set(self.vars_opt).difference([INDICATOR_CENTR])): @@ -308,50 +480,16 @@ def check(self): elif not any([col.startswith(INDICATOR_CENTR) for col in self.gdf.columns]): LOGGER.info("%s not set.", INDICATOR_CENTR) - # check if CRS is consistent - if self.crs != self.meta.get("crs"): - raise ValueError( - f"Inconsistent CRS definition, gdf ({self.crs}) attribute doesn't " - f"match meta ({self.meta.get('crs')}) attribute." - ) - - # check whether geometry corresponds to lat/lon - try: - if ( - self.gdf.geometry.values[0].x != self.gdf["longitude"].values[0] - or self.gdf.geometry.values[0].y != self.gdf["latitude"].values[0] - ): - raise ValueError( - "Geometry values do not correspond to latitude and" - + " longitude. Use set_geometry_points() or set_lat_lon()." - ) - except AttributeError: # no geometry column - pass - - def set_crs(self, crs=None): + def set_crs(self, crs=DEF_CRS): """Set the Coordinate Reference System. If the epxosures GeoDataFrame has a 'geometry' column it will be updated too. Parameters ---------- crs : object, optional - anything anything accepted by pyproj.CRS.from_user_input - if the original value is None it will be set to the default CRS. + anything anything accepted by pyproj.CRS.from_user_input. """ - # clear the meta dictionary entry - if "crs" in self.meta: - old_crs = self.meta.pop("crs") - crs = crs if crs else self.crs if self.crs else DEF_CRS - # adjust the dataframe - if "geometry" in self.gdf.columns: - try: - self.gdf.set_crs(crs, inplace=True) - except ValueError: - # restore popped crs and leave - self.meta["crs"] = old_crs - raise - # store the value - self.meta["crs"] = crs + self.data.geometry.set_crs(crs, inplace=True, allow_override=True) def set_gdf(self, gdf: GeoDataFrame, crs=None): """Set the `gdf` GeoDataFrame and update the CRS @@ -367,9 +505,7 @@ def set_gdf(self, gdf: GeoDataFrame, crs=None): if not isinstance(gdf, GeoDataFrame): raise ValueError("gdf is not a GeoDataFrame") # set the dataframe - self.gdf = gdf - # update the coordinate reference system - self.set_crs(crs) + self.data = Exposures(data=gdf, crs=crs).data def get_impf_column(self, haz_type=""): """Find the best matching column name in the exposures dataframe for a given hazard type, @@ -503,30 +639,30 @@ def assign_centroids( ) self.gdf[centr_haz] = assigned_centr + @deprecated( + details="Obsolete method call. As of climada 5.0, geometry points are set during" + " object initialization" + ) def set_geometry_points(self, scheduler=None): - """Set geometry attribute of GeoDataFrame with Points from latitude and - longitude attributes. - - Parameters - ---------- - scheduler : str, optional - used for dask map_partitions. - “threads”, “synchronous” or “processes” - """ - u_coord.set_df_geometry_points(self.gdf, scheduler=scheduler, crs=self.crs) + """obsolete and deprecated since climada 5.0""" + @deprecated( + details="latitude and longitude columns are no longer meaningful in Exposures`" + " GeoDataFrames. They can be retrieved from Exposures.latitude and .longitude" + " properties" + ) def set_lat_lon(self): """Set latitude and longitude attributes from geometry attribute.""" LOGGER.info("Setting latitude and longitude attributes.") - self.gdf["latitude"] = self.gdf.geometry[:].y - self.gdf["longitude"] = self.gdf.geometry[:].x + self.data["latitude"] = self.latitude + self.data["longitude"] = self.longitude + @deprecated( + details="The use of Exposures.set_from_raster is deprecated." + " Use Exposures.from_raster instead." + ) def set_from_raster(self, *args, **kwargs): """This function is deprecated, use Exposures.from_raster instead.""" - LOGGER.warning( - "The use of Exposures.set_from_raster is deprecated." - "Use Exposures.from_raster instead." - ) self.__dict__ = Exposures.from_raster(*args, **kwargs).__dict__ @classmethod @@ -658,11 +794,11 @@ def plot_scatter( pos_vals = self.gdf["value"][mask].values > 0 else: pos_vals = np.ones((self.gdf["value"][mask].values.size,), dtype=bool) - value = self.gdf["value"][mask][pos_vals].values + value = self.gdf.value[mask][pos_vals].values coord = np.stack( [ - self.gdf["latitude"][mask][pos_vals].values, - self.gdf["longitude"][mask][pos_vals].values, + self.gdf.geometry[mask][pos_vals].y.values, + self.gdf.geometry[mask][pos_vals].x.values, ], axis=1, ) @@ -747,8 +883,8 @@ def plot_hexbin( value = self.gdf["value"][mask][pos_vals].values coord = np.stack( [ - self.gdf["latitude"][mask][pos_vals].values, - self.gdf["longitude"][mask][pos_vals].values, + self.gdf.geometry[mask][pos_vals].y.values, + self.gdf.geometry[mask][pos_vals].x.values, ], axis=1, ) @@ -820,27 +956,22 @@ def plot_raster( ------- matplotlib.figure.Figure, cartopy.mpl.geoaxes.GeoAxesSubplot """ - if self.meta and self.meta.get("height", 0) * self.meta.get("height", 0) == len( - self.gdf - ): - raster = self.gdf["value"].values.reshape( - (self.meta["height"], self.meta["width"]) - ) - # check raster starts by upper left corner - if self.gdf["latitude"].values[0] < self.gdf["latitude"].values[-1]: - raster = np.flip(raster, axis=0) - if self.gdf["longitude"].values[0] > self.gdf["longitude"].values[-1]: - raise ValueError("Points are not ordered according to meta raster.") - else: - raster, meta = u_coord.points_to_raster( - self.gdf, ["value"], res, raster_res, scheduler - ) - raster = raster.reshape((meta["height"], meta["width"])) + + raster, meta = u_coord.points_to_raster( + points_df=self.data, + val_names=["value"], + res=res, + raster_res=raster_res, + crs=self.crs, + scheduler=None, + ) + raster = raster.reshape((meta["height"], meta["width"])) + # save tiff if save_tiff is not None: with rasterio.open( - save_tiff, - "w", + fp=save_tiff, + mode="w", driver="GTiff", height=meta["height"], width=meta["width"], @@ -856,15 +987,15 @@ def plot_raster( if isinstance(proj_data, ccrs.PlateCarree): # use different projections for plot and data to shift the central lon in the plot xmin, ymin, xmax, ymax = u_coord.latlon_bounds( - self.gdf["latitude"].values, self.gdf["longitude"].values + lat=self.latitude, lon=self.longitude ) proj_plot = ccrs.PlateCarree(central_longitude=0.5 * (xmin + xmax)) else: xmin, ymin, xmax, ymax = ( - self.gdf["longitude"].min(), - self.gdf["latitude"].min(), - self.gdf["longitude"].max(), - self.gdf["latitude"].max(), + self.longitude.min(), + self.latitude.min(), + self.longitude.max(), + self.latitude.max(), ) if not axis: @@ -945,8 +1076,6 @@ def plot_basemap( ------- matplotlib.figure.Figure, cartopy.mpl.geoaxes.GeoAxesSubplot """ - if "geometry" not in self.gdf: - self.set_geometry_points() crs_ori = self.crs self.to_crs(epsg=3857, inplace=True) axis = self.plot_scatter( @@ -988,6 +1117,7 @@ def write_hdf5(self, file_name): var_meta = {} for var in type(self)._metadata: var_meta[var] = getattr(self, var) + var_meta["crs"] = self.crs store.get_storer("exposures").attrs.metadata = var_meta store.close() @@ -1009,8 +1139,9 @@ def from_hdf5(cls, file_name): file_name : str (path and) file name to read from. additional_vars : list - list of additional variable names to read that - are not in exposures.base._metadata + list of additional variable names, other than the attributes of the Exposures class, + whose values are to be read into the Exposures object + class. Returns ------- @@ -1118,9 +1249,7 @@ def to_crs(self, crs=None, epsg=None, inplace=False): raise ValueError("one of crs or epsg must be None") if inplace: - self.gdf.to_crs(crs, epsg, True) - self.meta["crs"] = crs or f"EPSG:{epsg}" - self.set_lat_lon() + self.data.to_crs(crs, epsg, True) return None exp = self.copy() @@ -1157,21 +1286,10 @@ def write_raster(self, file_name, value_name="value", scheduler=None): file_name : str name output file in tif format """ - if self.meta and self.meta["height"] * self.meta["width"] == len(self.gdf): - raster = self.gdf[value_name].values.reshape( - (self.meta["height"], self.meta["width"]) - ) - # check raster starts by upper left corner - if self.gdf["latitude"].values[0] < self.gdf["latitude"].values[-1]: - raster = np.flip(raster, axis=0) - if self.gdf["longitude"].values[0] > self.gdf["longitude"].values[-1]: - raise ValueError("Points are not ordered according to meta raster.") - u_coord.write_raster(file_name, raster, self.meta) - else: - raster, meta = u_coord.points_to_raster( - self.gdf, [value_name], scheduler=scheduler - ) - u_coord.write_raster(file_name, raster, meta) + raster, meta = u_coord.points_to_raster( + self.gdf, [value_name], scheduler=scheduler + ) + u_coord.write_raster(file_name, raster, meta) @staticmethod def concat(exposures_list): @@ -1322,10 +1440,10 @@ def add_sea(exposures, sea_res, scheduler=None): sea_res = (sea_res[0] / ONE_LAT_KM, sea_res[1] / ONE_LAT_KM) - min_lat = max(-90, float(exposures.gdf["latitude"].min()) - sea_res[0]) - max_lat = min(90, float(exposures.gdf["latitude"].max()) + sea_res[0]) - min_lon = max(-180, float(exposures.gdf["longitude"].min()) - sea_res[0]) - max_lon = min(180, float(exposures.gdf["longitude"].max()) + sea_res[0]) + min_lat = max(-90, float(exposures.latitude.min()) - sea_res[0]) + max_lat = min(90, float(exposures.latitude.max()) + sea_res[0]) + min_lon = max(-180, float(exposures.longitude.min()) - sea_res[0]) + max_lon = min(180, float(exposures.longitude.max()) + sea_res[0]) lat_arr = np.arange(min_lat, max_lat + sea_res[1], sea_res[1]) lon_arr = np.arange(min_lon, max_lon + sea_res[1], sea_res[1]) @@ -1355,7 +1473,6 @@ def add_sea(exposures, sea_res, scheduler=None): crs=exposures.crs, ref_year=exposures.ref_year, value_unit=exposures.value_unit, - meta=exposures.meta, description=exposures.description, ) diff --git a/climada/entity/exposures/litpop/litpop.py b/climada/entity/exposures/litpop/litpop.py index 372e58533b..f70beea4c4 100644 --- a/climada/entity/exposures/litpop/litpop.py +++ b/climada/entity/exposures/litpop/litpop.py @@ -44,7 +44,7 @@ class LitPop(Exposures): """ - Holds geopandas GeoDataFrame with metada and columns (pd.Series) defined in + Holds geopandas GeoDataFrame with metadata and columns (pd.Series) defined in Attributes of Exposures class. LitPop exposure values are disaggregated proportional to a combination of nightlight intensity (NASA) and Gridded Population data (SEDAC). @@ -71,6 +71,23 @@ class LitPop(Exposures): _metadata = Exposures._metadata + ["exponents", "fin_mode", "gpw_version"] + def __init__( + self, + *args, + meta=None, + exponents=None, + fin_mode=None, + gpw_version=None, + **kwargs, + ): + super().__init__(*args, meta=meta, **kwargs) + meta = meta or {} + self.exponents = Exposures._consolidate(meta, "exponents", exponents, (1, 1)) + self.fin_mode = Exposures._consolidate(meta, "fin_mode", fin_mode, "pc") + self.gpw_version = Exposures._consolidate( + meta, "gpw_version", gpw_version, GPW_VERSION + ) + def set_countries(self, *args, **kwargs): """This function is deprecated, use LitPop.from_countries instead.""" LOGGER.warning( @@ -235,12 +252,12 @@ def from_countries( try: rows, cols, ras_trans = u_coord.pts_to_raster_meta( ( - exp.gdf["longitude"].min(), - exp.gdf["latitude"].min(), - exp.gdf["longitude"].max(), - exp.gdf["latitude"].max(), + exp.longitude.min(), + exp.latitude.min(), + exp.longitude.max(), + exp.latitude.max(), ), - u_coord.get_resolution(exp.gdf["longitude"], exp.gdf["latitude"]), + u_coord.get_resolution(exp.longitude, exp.latitude), ) exp.meta = { "width": cols, @@ -554,12 +571,12 @@ def from_shape_and_countries( try: rows, cols, ras_trans = u_coord.pts_to_raster_meta( ( - exp.gdf["longitude"].min(), - exp.gdf["latitude"].min(), - exp.gdf["longitude"].max(), - exp.gdf["latitude"].max(), + exp.longitude.min(), + exp.latitude.min(), + exp.longitude.max(), + exp.latitude.max(), ), - u_coord.get_resolution(exp.gdf["longitude"], exp.gdf["latitude"]), + u_coord.get_resolution(exp.longitude, exp.latitude), ) exp.meta = { "width": cols, @@ -688,18 +705,19 @@ def from_shape( ) if ( - min(len(exp.gdf["latitude"].unique()), len(exp.gdf["longitude"].unique())) - > 1 + exp.gdf.shape[0] > 1 + and exp.longitude.max() > exp.longitude.min() + and exp.latitude.max() > exp.latitude.min() ): - # if exp.gdf.shape[0] > 1 and len(exp.gdf.latitude.unique()) > 1: + # if exp.gdf.shape[0] > 1 and len(exp.latitude.unique()) > 1: rows, cols, ras_trans = u_coord.pts_to_raster_meta( ( - exp.gdf["longitude"].min(), - exp.gdf["latitude"].min(), - exp.gdf["longitude"].max(), - exp.gdf["latitude"].max(), + exp.longitude.min(), + exp.latitude.min(), + exp.longitude.max(), + exp.latitude.max(), ), - u_coord.get_resolution(exp.gdf["longitude"], exp.gdf["latitude"]), + u_coord.get_resolution(exp.longitude, exp.latitude), ) exp.meta = { "width": cols, @@ -949,7 +967,7 @@ def _get_litpop_single_polygon( gdf["region_id"] = region_id else: gdf["region_id"] = u_coord.get_country_code( - gdf["latitude"], gdf["longitude"], gridded=True + gdf.geometry.y, gdf.geometry.x, gridded=True ) # remove entries outside polygon with `dropna` and return GeoDataFrame: return gdf.dropna(), meta_out diff --git a/climada/entity/exposures/test/test_base.py b/climada/entity/exposures/test/test_base.py index 6650719a59..7c79f4a22d 100644 --- a/climada/entity/exposures/test/test_base.py +++ b/climada/entity/exposures/test/test_base.py @@ -27,6 +27,7 @@ import rasterio import scipy as sp from rasterio.windows import Window +from shapely.geometry import Point from sklearn.metrics import DistanceMetric import climada.util.coordinates as u_coord @@ -74,20 +75,20 @@ def test_assign_pass(self): ) ncentroids = haz.centroids.size - exp = Exposures(crs=haz.centroids.crs) - - # some are matching exactly, some are geographically close - exp.gdf["longitude"] = np.concatenate( - [ - haz.centroids.lon, - haz.centroids.lon + 0.001 * (-0.5 + np_rand.rand(ncentroids)), - ] - ) - exp.gdf["latitude"] = np.concatenate( - [ - haz.centroids.lat, - haz.centroids.lat + 0.001 * (-0.5 + np_rand.rand(ncentroids)), - ] + exp = Exposures( + crs=haz.centroids.crs, + lon=np.concatenate( + [ + haz.centroids.lon, + haz.centroids.lon + 0.001 * (-0.5 + np_rand.rand(ncentroids)), + ] + ), + lat=np.concatenate( + [ + haz.centroids.lat, + haz.centroids.lat + 0.001 * (-0.5 + np_rand.rand(ncentroids)), + ] + ), ) expected_result = np.concatenate([np.arange(ncentroids), np.arange(ncentroids)]) @@ -96,25 +97,21 @@ def test_assign_pass(self): haz.centroids.gdf["lat"] = haz.centroids.lat.astype(test_dtype) haz.centroids.gdf["lon"] = haz.centroids.lon.astype(test_dtype) exp.assign_centroids(haz) - self.assertEqual(exp.gdf.shape[0], len(exp.gdf[INDICATOR_CENTR + "FL"])) - np.testing.assert_array_equal( - exp.gdf[INDICATOR_CENTR + "FL"].values, expected_result - ) + self.assertEqual(exp.gdf.shape[0], len(exp.hazard_centroids("FL"))) + np.testing.assert_array_equal(exp.hazard_centroids("FL"), expected_result) exp.assign_centroids(Hazard(), overwrite=False) - self.assertEqual(exp.gdf.shape[0], len(exp.gdf[INDICATOR_CENTR + "FL"])) - np.testing.assert_array_equal( - exp.gdf[INDICATOR_CENTR + "FL"].values, expected_result - ) + self.assertEqual(exp.gdf.shape[0], len(exp.hazard_centroids("FL"))) + np.testing.assert_array_equal(exp.hazard_centroids("FL"), expected_result) def test__init__meta_type(self): """Check if meta of type list raises a ValueError in __init__""" - with self.assertRaises(ValueError) as cm: - Exposures(meta=[]) - self.assertEqual("meta must be a dictionary", str(cm.exception)) + with self.assertRaises(TypeError) as cm: + Exposures(meta="{}") + self.assertEqual("meta must be of type dict", str(cm.exception)) def test__init__geometry_type(self): """Check that initialization fails when `geometry` is given as a `str` argument""" - with self.assertRaises(ValueError) as cm: + with self.assertRaises(TypeError) as cm: Exposures(geometry="myname") self.assertEqual( "Exposures is not able to handle customized 'geometry' column names.", @@ -135,17 +132,17 @@ def test_read_raster_pass(self): exp.check() self.assertTrue(u_coord.equal_crs(exp.crs, DEF_CRS)) self.assertAlmostEqual( - exp.gdf["latitude"].max(), 10.248220966978932 - 0.009000000000000341 / 2 + exp.latitude.max(), 10.248220966978932 - 0.009000000000000341 / 2 ) self.assertAlmostEqual( - exp.gdf["latitude"].min(), + exp.latitude.min(), 10.248220966978932 - 0.009000000000000341 / 2 - 59 * 0.009000000000000341, ) self.assertAlmostEqual( - exp.gdf["longitude"].min(), -69.2471495969998 + 0.009000000000000341 / 2 + exp.longitude.min(), -69.2471495969998 + 0.009000000000000341 / 2 ) self.assertAlmostEqual( - exp.gdf["longitude"].max(), + exp.longitude.max(), -69.2471495969998 + 0.009000000000000341 / 2 + 49 * 0.009000000000000341, ) self.assertEqual(len(exp.gdf), 60 * 50) @@ -166,9 +163,9 @@ def test_assign_raster_pass(self): haz = Hazard("FL", centroids=Centroids.from_meta(meta)) # explicit points with known results (see `expected_result` for details) - exp = Exposures(crs=DEF_CRS) - exp.gdf["longitude"] = np.array( - [ + exp = Exposures( + crs=DEF_CRS, + lon=[ -20.1, -20.0, -19.8, @@ -190,10 +187,8 @@ def test_assign_raster_pass(self): -6.4, 9.8, 0.0, - ] - ) - exp.gdf["latitude"] = np.array( - [ + ], + lat=[ 7.3, 7.3, 7.3, @@ -215,7 +210,7 @@ def test_assign_raster_pass(self): -1.9, -1.7, 0.0, - ] + ], ) exp.assign_centroids(haz) @@ -269,8 +264,8 @@ def test_assign_raster_same_pass(self): def test_assign_large_hazard_subset_pass(self): """Test assign_centroids with raster hazard""" exp = Exposures.from_raster(HAZ_DEMO_FL, window=Window(10, 20, 50, 60)) - exp.gdf["latitude"][[0, 1]] = exp.gdf["latitude"][[1, 0]] - exp.gdf["longitude"][[0, 1]] = exp.gdf["longitude"][[1, 0]] + exp.latitude[[0, 1]] = exp.latitude[[1, 0]] + exp.longitude[[0, 1]] = exp.longitude[[1, 0]] exp.check() haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type="FL") exp.assign_centroids(haz) @@ -278,10 +273,10 @@ def test_assign_large_hazard_subset_pass(self): sel_cen=exp.gdf[INDICATOR_CENTR + "FL"].values ) np.testing.assert_array_equal( - np.unique(assigned_centroids.lat), np.unique(exp.gdf["latitude"]) + np.unique(assigned_centroids.lat), np.unique(exp.latitude) ) np.testing.assert_array_equal( - np.unique(assigned_centroids.lon), np.unique(exp.gdf["longitude"]) + np.unique(assigned_centroids.lon), np.unique(exp.longitude) ) def test_affected_total_value(self): @@ -340,60 +335,34 @@ class TestChecker(unittest.TestCase): def test_error_logs_fail(self): """Wrong exposures definition""" expo = good_exposures() - expo.gdf.drop(["longitude"], inplace=True, axis=1) + expo.gdf.drop(["value"], inplace=True, axis=1) with self.assertRaises(ValueError) as cm: expo.check() - self.assertIn("longitude missing", str(cm.exception)) + self.assertIn("value missing", str(cm.exception)) def test_error_logs_wrong_crs(self): """Ambiguous crs definition""" - expo = good_exposures() - expo.set_geometry_points() # sets crs to 4326 + expo = good_exposures() # epsg:4326 # all good _expo = Exposures(expo.gdf, meta={"crs": 4326}, crs=DEF_CRS) + self.assertEqual(expo.crs, _expo.crs) - with self.assertRaises(ValueError) as cm: - _expo = Exposures(expo.gdf, meta={"crs": 4230}, crs=4326) - self.assertIn( - "Inconsistent CRS definition, crs and meta arguments don't match", - str(cm.exception), - ) - - with self.assertRaises(ValueError) as cm: - _expo = Exposures(expo.gdf, meta={"crs": 4230}) - self.assertIn( - "Inconsistent CRS definition, data doesn't match meta or crs argument", - str(cm.exception), - ) + # still good: crs in argument and meta override crs from data frame + _expo = Exposures(expo.gdf, meta={"crs": 4230}) + self.assertNotEqual(expo.crs, _expo.crs) - with self.assertRaises(ValueError) as cm: - _expo = Exposures(expo.gdf, crs="epsg:4230") - self.assertIn( - "Inconsistent CRS definition, data doesn't match meta or crs argument", - str(cm.exception), - ) + _expo = Exposures(expo.gdf, crs="epsg:4230") + self.assertTrue(u_coord.equal_crs(_expo.crs, 4230)) - _expo = Exposures(expo.gdf) - _expo.meta["crs"] = "epsg:4230" + # bad: direct and indirect (meta) argument conflict with self.assertRaises(ValueError) as cm: - _expo.check() + _expo = Exposures(expo.gdf, meta={"crs": 4230}, crs=4326) self.assertIn( - "Inconsistent CRS definition, gdf (EPSG:4326) attribute doesn't match " - "meta (epsg:4230) attribute.", - str(cm.exception), + "conflicting arguments: the given crs is different", str(cm.exception) ) - def test_error_geometry_fail(self): - """Wrong exposures definition""" - expo = good_exposures() - expo.set_geometry_points() - expo.gdf["latitude"].values[0] = 5 - - with self.assertRaises(ValueError): - expo.check() - class TestIO(unittest.TestCase): """Check constructor Exposures through DataFrames readers""" @@ -410,7 +379,6 @@ def test_read_template_pass(self): def test_io_hdf5_pass(self): """write and read hdf5""" exp_df = Exposures(pd.read_excel(ENT_TEMPLATE_XLS), crs="epsg:32632") - exp_df.set_geometry_points() exp_df.check() # set metadata exp_df.ref_year = 2020 @@ -430,49 +398,40 @@ def test_io_hdf5_pass(self): self.assertEqual(exp_df.ref_year, exp_read.ref_year) self.assertEqual(exp_df.value_unit, exp_read.value_unit) - self.assertDictEqual(exp_df.meta, exp_read.meta) - self.assertTrue(u_coord.equal_crs(exp_df.crs, exp_read.crs)) - self.assertTrue(u_coord.equal_crs(exp_df.gdf.crs, exp_read.gdf.crs)) self.assertEqual(exp_df.description, exp_read.description) + np.testing.assert_array_equal(exp_df.latitude, exp_read.latitude) + np.testing.assert_array_equal(exp_df.longitude, exp_read.longitude) + np.testing.assert_array_equal(exp_df.value, exp_read.value) np.testing.assert_array_equal( - exp_df.gdf["latitude"].values, exp_read.gdf["latitude"].values + exp_df.data["deductible"].values, exp_read.data["deductible"].values ) np.testing.assert_array_equal( - exp_df.gdf["longitude"].values, exp_read.gdf["longitude"].values + exp_df.data["cover"].values, exp_read.data["cover"].values ) np.testing.assert_array_equal( - exp_df.gdf["value"].values, exp_read.gdf["value"].values + exp_df.data["region_id"].values, exp_read.data["region_id"].values ) np.testing.assert_array_equal( - exp_df.gdf["deductible"].values, exp_read.gdf["deductible"].values + exp_df.data["category_id"].values, exp_read.data["category_id"].values ) np.testing.assert_array_equal( - exp_df.gdf["cover"].values, exp_read.gdf["cover"].values + exp_df.data["impf_TC"].values, exp_read.data["impf_TC"].values ) np.testing.assert_array_equal( - exp_df.gdf["region_id"].values, exp_read.gdf["region_id"].values + exp_df.data["centr_TC"].values, exp_read.data["centr_TC"].values ) np.testing.assert_array_equal( - exp_df.gdf["category_id"].values, exp_read.gdf["category_id"].values + exp_df.data["impf_FL"].values, exp_read.data["impf_FL"].values ) np.testing.assert_array_equal( - exp_df.gdf["impf_TC"].values, exp_read.gdf["impf_TC"].values - ) - np.testing.assert_array_equal( - exp_df.gdf["centr_TC"].values, exp_read.gdf["centr_TC"].values - ) - np.testing.assert_array_equal( - exp_df.gdf["impf_FL"].values, exp_read.gdf["impf_FL"].values - ) - np.testing.assert_array_equal( - exp_df.gdf["centr_FL"].values, exp_read.gdf["centr_FL"].values + exp_df.data["centr_FL"].values, exp_read.data["centr_FL"].values ) - for point_df, point_read in zip( - exp_df.gdf.geometry.values, exp_read.gdf.geometry.values - ): - self.assertEqual(point_df.x, point_read.x) - self.assertEqual(point_df.y, point_read.y) + self.assertTrue( + u_coord.equal_crs(exp_df.crs, exp_read.crs), + f"{exp_df.crs} and {exp_read.crs} are different", + ) + self.assertTrue(u_coord.equal_crs(exp_df.gdf.crs, exp_read.gdf.crs)) class TestAddSea(unittest.TestCase): @@ -480,16 +439,20 @@ class TestAddSea(unittest.TestCase): def test_add_sea_pass(self): """Test add_sea function with fake data.""" - exp = Exposures() - exp.gdf["value"] = np.arange(0, 1.0e6, 1.0e5) min_lat, max_lat = 27.5, 30 min_lon, max_lon = -18, -12 - exp.gdf["latitude"] = np.linspace(min_lat, max_lat, 10) - exp.gdf["longitude"] = np.linspace(min_lon, max_lon, 10) - exp.gdf["region_id"] = np.ones(10) - exp.gdf["impf_TC"] = np.ones(10) - exp.ref_year = 2015 - exp.value_unit = "XSD" + + exp = Exposures( + data=dict( + value=np.arange(0, 1.0e6, 1.0e5), + latitude=np.linspace(min_lat, max_lat, 10), + longitude=np.linspace(min_lon, max_lon, 10), + region_id=np.ones(10), + impf_TC=np.ones(10), + ), + ref_year=2015, + value_unit="XSD", + ) exp.check() sea_coast = 100 @@ -505,16 +468,14 @@ def test_add_sea_pass(self): max_lat = max_lat + sea_coast min_lon = min_lon - sea_coast max_lon = max_lon + sea_coast - self.assertEqual(np.min(exp_sea.gdf["latitude"]), min_lat) - self.assertEqual(np.min(exp_sea.gdf["longitude"]), min_lon) - np.testing.assert_array_equal( - exp_sea.gdf.value.values[:10], np.arange(0, 1.0e6, 1.0e5) - ) + self.assertEqual(np.min(exp_sea.latitude), min_lat) + self.assertEqual(np.min(exp_sea.longitude), min_lon) + np.testing.assert_array_equal(exp_sea.value[:10], np.arange(0, 1.0e6, 1.0e5)) self.assertEqual(exp_sea.ref_year, exp.ref_year) self.assertEqual(exp_sea.value_unit, exp.value_unit) - on_sea_lat = exp_sea.gdf["latitude"].values[11:] - on_sea_lon = exp_sea.gdf["longitude"].values[11:] + on_sea_lat = exp_sea.latitude[11:] + on_sea_lon = exp_sea.longitude[11:] res_on_sea = u_coord.coord_on_land(on_sea_lat, on_sea_lon) res_on_sea = ~res_on_sea self.assertTrue(np.all(res_on_sea)) @@ -523,14 +484,8 @@ def test_add_sea_pass(self): self.assertAlmostEqual( dist.pairwise( [ - [ - exp_sea.gdf["longitude"].values[-1], - exp_sea.gdf["latitude"].values[-1], - ], - [ - exp_sea.gdf["longitude"].values[-2], - exp_sea.gdf["latitude"].values[-2], - ], + [exp_sea.longitude[-1], exp_sea.latitude[-1]], + [exp_sea.longitude[-2], exp_sea.latitude[-2]], ] )[0][1], sea_res_km, @@ -541,16 +496,20 @@ class TestConcat(unittest.TestCase): """Check constructor Exposures through DataFrames readers""" def setUp(self): - exp = Exposures(crs="epsg:3395") - exp.gdf["value"] = np.arange(0, 1.0e6, 1.0e5) min_lat, max_lat = 27.5, 30 min_lon, max_lon = -18, -12 - exp.gdf["latitude"] = np.linspace(min_lat, max_lat, 10) - exp.gdf["longitude"] = np.linspace(min_lon, max_lon, 10) - exp.gdf["region_id"] = np.ones(10) - exp.gdf["impf_TC"] = np.ones(10) - exp.ref_year = 2015 - exp.value_unit = "XSD" + exp = Exposures( + crs="epsg:3395", + value=np.arange(0, 1.0e6, 1.0e5), + lat=np.linspace(min_lat, max_lat, 10), + lon=np.linspace(min_lon, max_lon, 10), + ref_year=2015, + value_unit="XSD", + data=dict( + region_id=np.ones(10), + impf_TC=np.ones(10), + ), + ) self.dummy = exp def test_concat_pass(self): @@ -566,8 +525,11 @@ def test_concat_pass(self): self.dummy, ] ) - self.assertEqual(self.dummy.gdf.shape, (10, 5)) - self.assertEqual(catexp.gdf.shape, (40, 5)) + self.assertEqual( + list(self.dummy.gdf.columns), ["region_id", "impf_TC", "geometry", "value"] + ) + self.assertEqual(self.dummy.gdf.shape, (10, 4)) + self.assertEqual(catexp.gdf.shape, (40, 4)) self.assertTrue(u_coord.equal_crs(catexp.crs, "epsg:3395")) def test_concat_fail(self): @@ -592,17 +554,12 @@ def test_copy_pass(self): self.assertEqual(exp_copy.ref_year, exp.ref_year) self.assertEqual(exp_copy.value_unit, exp.value_unit) self.assertEqual(exp_copy.description, exp.description) - np.testing.assert_array_equal( - exp_copy.gdf["latitude"].values, exp.gdf["latitude"].values - ) - np.testing.assert_array_equal( - exp_copy.gdf["longitude"].values, exp.gdf["longitude"].values - ) + np.testing.assert_array_equal(exp_copy.latitude, exp.latitude) + np.testing.assert_array_equal(exp_copy.longitude, exp.longitude) def test_to_crs_inplace_pass(self): """Test to_crs function inplace.""" exp = good_exposures() - exp.set_geometry_points() exp.check() exp.to_crs("epsg:3395", inplace=True) self.assertIsInstance(exp, Exposures) @@ -614,7 +571,6 @@ def test_to_crs_inplace_pass(self): def test_to_crs_pass(self): """Test to_crs function copy.""" exp = good_exposures() - exp.set_geometry_points() exp.check() exp_tr = exp.to_crs("epsg:3395") self.assertIsInstance(exp, Exposures) @@ -626,12 +582,13 @@ def test_to_crs_pass(self): def test_constructor_pass(self): """Test initialization with input GeoDataFrame""" - in_gpd = gpd.GeoDataFrame() - in_gpd["value"] = np.zeros(10) - in_gpd.ref_year = 2015 + in_gpd = gpd.GeoDataFrame( + dict(latitude=range(10), longitude=[0] * 10, value=np.zeros(10)) + ) in_exp = Exposures(in_gpd, ref_year=2015) self.assertEqual(in_exp.ref_year, 2015) - np.testing.assert_array_equal(in_exp.gdf["value"], np.zeros(10)) + np.testing.assert_array_equal(in_exp.value, np.zeros(10)) + self.assertEqual(in_exp.gdf.geometry[0], Point(0, 0)) def test_error_on_access_item(self): """Test error output when trying to access items as in CLIMADA 1.x""" @@ -647,33 +604,34 @@ def test_set_gdf(self): gdf_without_geometry = good_exposures().gdf good_exp = good_exposures() good_exp.set_crs(crs="epsg:3395") - good_exp.set_geometry_points() gdf_with_geometry = good_exp.gdf probe = Exposures() self.assertRaises(ValueError, probe.set_gdf, pd.DataFrame()) probe.set_gdf(empty_gdf) - self.assertTrue(probe.gdf.equals(gpd.GeoDataFrame())) + self.assertTrue(probe.gdf.equals(gpd.GeoDataFrame().set_geometry([]))) self.assertTrue(u_coord.equal_crs(DEF_CRS, probe.crs)) - self.assertFalse(hasattr(probe.gdf, "crs")) + self.assertTrue(u_coord.equal_crs(DEF_CRS, probe.gdf.crs)) probe.set_gdf(gdf_with_geometry) self.assertTrue(probe.gdf.equals(gdf_with_geometry)) - self.assertTrue(u_coord.equal_crs("epsg:3395", probe.crs)) + self.assertTrue(u_coord.equal_crs("epsg:3395", gdf_with_geometry.crs)) + self.assertTrue( + u_coord.equal_crs("epsg:3395", probe.crs), f"unexpected: {probe.crs}" + ) self.assertTrue(u_coord.equal_crs("epsg:3395", probe.gdf.crs)) probe.set_gdf(gdf_without_geometry) self.assertTrue(probe.gdf.equals(good_exposures().gdf)) self.assertTrue(u_coord.equal_crs(DEF_CRS, probe.crs)) - self.assertFalse(hasattr(probe.gdf, "crs")) + self.assertTrue(u_coord.equal_crs(DEF_CRS, probe.gdf.crs)) def test_set_crs(self): """Test setting the CRS""" empty_gdf = gpd.GeoDataFrame() gdf_without_geometry = good_exposures().gdf good_exp = good_exposures() - good_exp.set_geometry_points() gdf_with_geometry = good_exp.gdf probe = Exposures(gdf_without_geometry) @@ -685,8 +643,8 @@ def test_set_crs(self): self.assertTrue(u_coord.equal_crs(DEF_CRS, probe.crs)) probe.set_crs(DEF_CRS) self.assertTrue(u_coord.equal_crs(DEF_CRS, probe.crs)) - self.assertRaises(ValueError, probe.set_crs, "epsg:3395") - self.assertTrue(u_coord.equal_crs("EPSG:4326", probe.meta.get("crs"))) + probe.set_crs("epsg:3395") + self.assertTrue(u_coord.equal_crs("epsg:3395", probe.crs)) def test_to_crs_epsg_crs(self): """Check that if crs and epsg are both provided a ValueError is raised""" @@ -708,30 +666,28 @@ def test_get_impf_column(self): self.assertRaises(ValueError, expo.get_impf_column, "HAZ") # removed impf column - expo.gdf.drop(columns="impf_NA", inplace=True) + expo.data.drop(columns="impf_NA", inplace=True) self.assertRaises(ValueError, expo.get_impf_column, "NA") self.assertRaises(ValueError, expo.get_impf_column) # default (anonymous) impf column - expo.check() + expo.data["impf_"] = 1 self.assertEqual("impf_", expo.get_impf_column()) self.assertEqual("impf_", expo.get_impf_column("HAZ")) # rename impf column to old style column name - expo.gdf.rename(columns={"impf_": "if_"}, inplace=True) - expo.check() + expo.data.rename(columns={"impf_": "if_"}, inplace=True) self.assertEqual("if_", expo.get_impf_column()) self.assertEqual("if_", expo.get_impf_column("HAZ")) # rename impf column to old style column name - expo.gdf.rename(columns={"if_": "if_NA"}, inplace=True) - expo.check() + expo.data.rename(columns={"if_": "if_NA"}, inplace=True) self.assertEqual("if_NA", expo.get_impf_column("NA")) self.assertRaises(ValueError, expo.get_impf_column) self.assertRaises(ValueError, expo.get_impf_column, "HAZ") # add anonymous impf column - expo.gdf["impf_"] = expo.gdf["region_id"] + expo.data["impf_"] = expo.region_id self.assertEqual("if_NA", expo.get_impf_column("NA")) self.assertEqual("impf_", expo.get_impf_column()) self.assertEqual("impf_", expo.get_impf_column("HAZ")) diff --git a/climada/entity/exposures/test/test_mat.py b/climada/entity/exposures/test/test_mat.py index 540b92c880..4993b3cde7 100644 --- a/climada/entity/exposures/test/test_mat.py +++ b/climada/entity/exposures/test/test_mat.py @@ -43,40 +43,40 @@ def test_read_demo_pass(self): self.assertEqual(expo.gdf.index[0], 0) self.assertEqual(expo.gdf.index[n_expos - 1], n_expos - 1) - self.assertEqual(expo.gdf["value"].shape, (n_expos,)) - self.assertEqual(expo.gdf["value"][0], 13927504367.680632) - self.assertEqual(expo.gdf["value"][n_expos - 1], 12624818493.687229) + self.assertEqual(expo.value.shape, (n_expos,)) + self.assertEqual(expo.value[0], 13927504367.680632) + self.assertEqual(expo.value[n_expos - 1], 12624818493.687229) - self.assertEqual(expo.gdf["deductible"].shape, (n_expos,)) - self.assertEqual(expo.gdf["deductible"][0], 0) - self.assertEqual(expo.gdf["deductible"][n_expos - 1], 0) + self.assertEqual(expo.deductible.shape, (n_expos,)) + self.assertEqual(expo.deductible[0], 0) + self.assertEqual(expo.deductible[n_expos - 1], 0) - self.assertEqual(expo.gdf["cover"].shape, (n_expos,)) - self.assertEqual(expo.gdf["cover"][0], 13927504367.680632) - self.assertEqual(expo.gdf["cover"][n_expos - 1], 12624818493.687229) + self.assertEqual(expo.cover.shape, (n_expos,)) + self.assertEqual(expo.cover[0], 13927504367.680632) + self.assertEqual(expo.cover[n_expos - 1], 12624818493.687229) - self.assertIn("int", str(expo.gdf["impf_"].dtype)) - self.assertEqual(expo.gdf["impf_"].shape, (n_expos,)) - self.assertEqual(expo.gdf["impf_"][0], 1) - self.assertEqual(expo.gdf["impf_"][n_expos - 1], 1) + self.assertIn("int", str(expo.hazard_impf().dtype)) + self.assertEqual(expo.hazard_impf().shape, (n_expos,)) + self.assertEqual(expo.hazard_impf()[0], 1) + self.assertEqual(expo.hazard_impf()[n_expos - 1], 1) - self.assertIn("int", str(expo.gdf["category_id"].dtype)) - self.assertEqual(expo.gdf["category_id"].shape, (n_expos,)) - self.assertEqual(expo.gdf["category_id"][0], 1) - self.assertEqual(expo.gdf["category_id"][n_expos - 1], 1) + self.assertIn("int", str(expo.category_id.dtype)) + self.assertEqual(expo.category_id.shape, (n_expos,)) + self.assertEqual(expo.category_id[0], 1) + self.assertEqual(expo.category_id[n_expos - 1], 1) - self.assertIn("int", str(expo.gdf["centr_"].dtype)) - self.assertEqual(expo.gdf["centr_"].shape, (n_expos,)) - self.assertEqual(expo.gdf["centr_"][0], 47) - self.assertEqual(expo.gdf["centr_"][n_expos - 1], 46) + self.assertIn("int", str(expo.hazard_centroids().dtype)) + self.assertEqual(expo.hazard_centroids().shape, (n_expos,)) + self.assertEqual(expo.hazard_centroids()[0], 47) + self.assertEqual(expo.hazard_centroids()[n_expos - 1], 46) self.assertTrue("region_id" not in expo.gdf) - self.assertEqual(expo.gdf["latitude"].shape, (n_expos,)) - self.assertEqual(expo.gdf["latitude"][0], 26.93389900000) - self.assertEqual(expo.gdf["latitude"][n_expos - 1], 26.34795700000) - self.assertEqual(expo.gdf["longitude"][0], -80.12879900000) - self.assertEqual(expo.gdf["longitude"][n_expos - 1], -80.15885500000) + self.assertEqual(expo.latitude.size, n_expos) + self.assertEqual(expo.latitude[0], 26.93389900000) + self.assertEqual(expo.latitude[n_expos - 1], 26.34795700000) + self.assertEqual(expo.longitude[0], -80.12879900000) + self.assertEqual(expo.longitude[n_expos - 1], -80.15885500000) self.assertEqual(expo.ref_year, 2016) self.assertEqual(expo.value_unit, "USD") diff --git a/climada/entity/measures/base.py b/climada/entity/measures/base.py index 93505feb38..c29e78fffb 100755 --- a/climada/entity/measures/base.py +++ b/climada/entity/measures/base.py @@ -324,11 +324,9 @@ def _change_all_exposures(self, exposures): ) if not np.array_equal( - np.unique(exposures.gdf["latitude"].values), - np.unique(new_exp.gdf["latitude"].values), + np.unique(exposures.latitude), np.unique(new_exp.latitude) ) or not np.array_equal( - np.unique(exposures.gdf["longitude"].values), - np.unique(new_exp.gdf["longitude"].values), + np.unique(exposures.longitude), np.unique(new_exp.longitude) ): LOGGER.warning("Exposures locations have changed.") @@ -433,7 +431,7 @@ def _cutoff_hazard_damage(self, exposures, impf_set, hazard): if self.exp_region_id: # compute impact only in selected region in_reg = np.logical_or.reduce( - [exposures.gdf["region_id"].values == reg for reg in self.exp_region_id] + [exposures.region_id == reg for reg in self.exp_region_id] ) exp_imp = Exposures(exposures.gdf[in_reg], crs=exposures.crs) else: diff --git a/climada/entity/measures/test/test_base.py b/climada/entity/measures/test/test_base.py index 4f14f4a5ad..d8688e4bf1 100644 --- a/climada/entity/measures/test/test_base.py +++ b/climada/entity/measures/test/test_base.py @@ -29,7 +29,7 @@ import climada.util.coordinates as u_coord from climada import CONFIG from climada.entity.entity_def import Entity -from climada.entity.exposures.base import INDICATOR_IMPF, Exposures +from climada.entity.exposures.base import Exposures from climada.entity.impact_funcs.base import ImpactFunc from climada.entity.impact_funcs.impact_func_set import ImpactFuncSet from climada.entity.measures.base import IMPF_ID_FACT, Measure @@ -236,25 +236,14 @@ def test_change_exposures_impf_pass(self): self.assertEqual(new_exp.ref_year, exp.ref_year) self.assertEqual(new_exp.value_unit, exp.value_unit) self.assertEqual(new_exp.description, exp.description) + self.assertTrue(np.array_equal(new_exp.value, exp.value)) + self.assertTrue(np.array_equal(new_exp.latitude, exp.latitude)) + self.assertTrue(np.array_equal(new_exp.longitude, exp.longitude)) self.assertTrue( - np.array_equal(new_exp.gdf["value"].values, exp.gdf["value"].values) + np.array_equal(exp.hazard_impf("TC"), np.ones(new_exp.gdf.shape[0])) ) self.assertTrue( - np.array_equal(new_exp.gdf["latitude"].values, exp.gdf["latitude"].values) - ) - self.assertTrue( - np.array_equal(new_exp.gdf["longitude"].values, exp.gdf["longitude"].values) - ) - self.assertTrue( - np.array_equal( - exp.gdf[INDICATOR_IMPF + "TC"].values, np.ones(new_exp.gdf.shape[0]) - ) - ) - self.assertTrue( - np.array_equal( - new_exp.gdf[INDICATOR_IMPF + "TC"].values, - np.ones(new_exp.gdf.shape[0]) * 3, - ) + np.array_equal(new_exp.hazard_impf("TC"), np.ones(new_exp.gdf.shape[0]) * 3) ) def test_change_all_hazard_pass(self): @@ -290,19 +279,9 @@ def test_change_all_exposures_pass(self): self.assertEqual(new_exp.ref_year, ref_exp.ref_year) self.assertEqual(new_exp.value_unit, ref_exp.value_unit) self.assertEqual(new_exp.description, ref_exp.description) - self.assertTrue( - np.array_equal(new_exp.gdf["value"].values, ref_exp.gdf["value"].values) - ) - self.assertTrue( - np.array_equal( - new_exp.gdf["latitude"].values, ref_exp.gdf["latitude"].values - ) - ) - self.assertTrue( - np.array_equal( - new_exp.gdf["longitude"].values, ref_exp.gdf["longitude"].values - ) - ) + self.assertTrue(np.array_equal(new_exp.value, ref_exp.value)) + self.assertTrue(np.array_equal(new_exp.latitude, ref_exp.latitude)) + self.assertTrue(np.array_equal(new_exp.longitude, ref_exp.longitude)) def test_not_filter_exposures_pass(self): """Test _filter_exposures method with []""" @@ -369,17 +348,11 @@ def test_filter_exposures_pass(self): self.assertEqual(res_exp.value_unit, exp.value_unit) self.assertEqual(res_exp.description, exp.description) self.assertTrue(u_coord.equal_crs(res_exp.crs, exp.crs)) - self.assertFalse(hasattr(exp.gdf, "crs")) - self.assertFalse(hasattr(res_exp.gdf, "crs")) # regions (that is just input data, no need for testing, but it makes the changed and unchanged parts obious) - self.assertTrue(np.array_equal(res_exp.gdf["region_id"].values[0], 4)) - self.assertTrue( - np.array_equal(res_exp.gdf["region_id"].values[1:25], np.ones(24) * 3) - ) - self.assertTrue( - np.array_equal(res_exp.gdf["region_id"].values[25:], np.ones(25)) - ) + self.assertTrue(np.array_equal(res_exp.region_id[0], 4)) + self.assertTrue(np.array_equal(res_exp.region_id[1:25], np.ones(24) * 3)) + self.assertTrue(np.array_equal(res_exp.region_id[25:], np.ones(25))) # changed exposures self.assertTrue( @@ -402,17 +375,8 @@ def test_filter_exposures_pass(self): ) ) ) - self.assertTrue( - np.array_equal( - res_exp.gdf["latitude"].values[:25], new_exp.gdf["latitude"].values[:25] - ) - ) - self.assertTrue( - np.array_equal( - res_exp.gdf["longitude"].values[:25], - new_exp.gdf["longitude"].values[:25], - ) - ) + self.assertTrue(np.array_equal(res_exp.latitude[:25], new_exp.latitude[:25])) + self.assertTrue(np.array_equal(res_exp.longitude[:25], new_exp.longitude[:25])) # unchanged exposures self.assertTrue( @@ -432,16 +396,8 @@ def test_filter_exposures_pass(self): res_exp.gdf["impf_TC"].values[25:], exp.gdf["impf_TC"].values[25:] ) ) - self.assertTrue( - np.array_equal( - res_exp.gdf["latitude"].values[25:], exp.gdf["latitude"].values[25:] - ) - ) - self.assertTrue( - np.array_equal( - res_exp.gdf["longitude"].values[25:], exp.gdf["longitude"].values[25:] - ) - ) + self.assertTrue(np.array_equal(res_exp.latitude[25:], exp.latitude[25:])) + self.assertTrue(np.array_equal(res_exp.longitude[25:], exp.longitude[25:])) # unchanged impact functions self.assertEqual(list(res_ifs.get_func().keys()), [meas.haz_type]) @@ -657,12 +613,8 @@ def test_calc_impact_pass(self): self.assertAlmostEqual(imp.at_event[12], 1.470194187501225e07) self.assertAlmostEqual(imp.at_event[41], 4.7226357936631286e08) self.assertAlmostEqual(imp.at_event[11890], 1.742110428135755e07) - self.assertTrue( - np.array_equal(imp.coord_exp[:, 0], entity.exposures.gdf["latitude"]) - ) - self.assertTrue( - np.array_equal(imp.coord_exp[:, 1], entity.exposures.gdf["longitude"]) - ) + self.assertTrue(np.array_equal(imp.coord_exp[:, 0], entity.exposures.latitude)) + self.assertTrue(np.array_equal(imp.coord_exp[:, 1], entity.exposures.longitude)) self.assertAlmostEqual(imp.eai_exp[0], 1.15677655725858e08) self.assertAlmostEqual(imp.eai_exp[-1], 7.528669956120645e07) self.assertAlmostEqual(imp.tot_value, 6.570532945599105e11) diff --git a/climada/hazard/centroids/centr.py b/climada/hazard/centroids/centr.py index c1e8bb68bf..b0c6365c7e 100644 --- a/climada/hazard/centroids/centr.py +++ b/climada/hazard/centroids/centr.py @@ -133,6 +133,8 @@ def geometry(self): @property def on_land(self): """Get the on_land property""" + if "on_land" not in self.gdf: + return None if self.gdf["on_land"].isna().all(): return None return self.gdf["on_land"].values @@ -140,6 +142,8 @@ def on_land(self): @property def region_id(self): """Get the assigned region_id""" + if "region_id" not in self.gdf: + return None if self.gdf["region_id"].isna().all(): return None return self.gdf["region_id"].values @@ -284,33 +288,15 @@ def from_exposures(cls, exposures): ------ ValueError """ + # exclude exposures specific columns col_names = [ column for column in exposures.gdf.columns if not any(pattern in column for pattern in EXP_SPECIFIC_COLS) ] - # Legacy behaviour - # Exposures can be without geometry column - # TODO: remove once exposures is real geodataframe with geometry. - if "geometry" in exposures.gdf.columns: - gdf = exposures.gdf[col_names] - return cls.from_geodataframe(gdf) - - if "latitude" in exposures.gdf.columns and "longitude" in exposures.gdf.columns: - gdf = exposures.gdf[col_names] - return cls( - lat=exposures.gdf["latitude"], - lon=exposures.gdf["longitude"], - crs=exposures.crs, - **dict(gdf.items()), - ) - - raise ValueError( - "The given exposures object has no coordinates information." - "The exposures' GeoDataFrame must have either point geometries" - " or latitude and longitude values." - ) + gdf = exposures.gdf[col_names] + return cls.from_geodataframe(gdf) @classmethod def from_pnt_bounds(cls, points_bounds, res, crs=DEF_CRS): diff --git a/climada/hazard/centroids/test/test_centr.py b/climada/hazard/centroids/test/test_centr.py index a41060bae2..778d9383ef 100644 --- a/climada/hazard/centroids/test/test_centr.py +++ b/climada/hazard/centroids/test/test_centr.py @@ -392,7 +392,7 @@ def test_set_region_id_implementationerror(self): centroids.set_region_id(level="continent", overwrite=True) def test_set_geometry_points_pass(self): - """Test set_geometry_points""" + """Test geometry is set""" centr_ras = Centroids.from_raster_file(HAZ_DEMO_FL, window=Window(0, 0, 50, 60)) x_flat = np.arange(-69.3326495969998, -68.88264959699978, 0.009000000000000341) y_flat = np.arange(10.423720966978939, 9.883720966978919, -0.009000000000000341) @@ -708,11 +708,13 @@ def test_from_exposures_without_region_id(self): False, ) - def test_from_exposure_exceptions(self): + def test_from_empty_exposures(self): gdf = gpd.GeoDataFrame({}) exposures = Exposures(gdf) - with self.assertRaises(ValueError): - Centroids.from_exposures(exposures) + centroids = Centroids.from_exposures(exposures) + self.assertEqual( + centroids.gdf.shape, (0, 1) + ) # there is an empty geometry column def test_read_write_hdf5(self): tmpfile = Path("test_write_hdf5.out.hdf5") diff --git a/climada/hazard/tc_tracks.py b/climada/hazard/tc_tracks.py index 3f2fb85b83..963d282cd3 100644 --- a/climada/hazard/tc_tracks.py +++ b/climada/hazard/tc_tracks.py @@ -340,9 +340,9 @@ def tracks_in_exp(self, exposure, buffer=1.0): if buffer <= 0.0: raise ValueError(f"buffer={buffer} is invalid, must be above zero.") try: - exposure.gdf.geometry + exposure.geometry except AttributeError: - exposure.set_geometry_points() + raise Exception("this is not an Exposures object") exp_buffer = exposure.gdf.buffer(distance=buffer, resolution=0) exp_buffer = exp_buffer.unary_union diff --git a/climada/test/test_api_client.py b/climada/test/test_api_client.py index 6bd86ed4f0..26ce163fdb 100644 --- a/climada/test/test_api_client.py +++ b/climada/test/test_api_client.py @@ -181,7 +181,7 @@ def test_get_exposures(self): dump_dir=DATA_DIR, ) self.assertEqual(len(exposures.gdf), 5782) - self.assertEqual(np.unique(exposures.gdf["region_id"]), 40) + self.assertEqual(np.unique(exposures.region_id), 40) self.assertEqual( exposures.description, "LitPop Exposure for ['AUT'] at 150 as, year: 2018, financial mode: pop, exp: [0, 1], admin1_calc: False", @@ -266,7 +266,7 @@ def test_get_litpop(self): client = Client() litpop = client.get_litpop(country="LUX", version="v1", dump_dir=DATA_DIR) self.assertEqual(len(litpop.gdf), 188) - self.assertEqual(np.unique(litpop.gdf["region_id"]), 442) + self.assertEqual(np.unique(litpop.region_id), 442) self.assertEqual( litpop.description, "LitPop Exposure for ['LUX'] at 150 as, year: 2018, financial mode: pc, exp: [1, 1], admin1_calc: False", diff --git a/climada/test/test_litpop_integr.py b/climada/test/test_litpop_integr.py index 0390a4538a..2c2ddba88b 100644 --- a/climada/test/test_litpop_integr.py +++ b/climada/test/test_litpop_integr.py @@ -76,8 +76,8 @@ def test_switzerland300_pass(self): ) self.assertIn("LitPop: Init Exposure for country: CHE", cm.output[0]) - self.assertEqual(ent.gdf["region_id"].min(), 756) - self.assertEqual(ent.gdf["region_id"].max(), 756) + self.assertEqual(ent.region_id.min(), 756) + self.assertEqual(ent.region_id.max(), 756) # confirm that the total value is equal to GDP * (income_group+1): self.assertAlmostEqual( ent.gdf["value"].sum() / gdp("CHE", 2016)[1], @@ -115,9 +115,9 @@ def test_switzerland30normPop_pass(self): ) # print(cm) self.assertIn("LitPop: Init Exposure for country: CHE", cm.output[0]) - self.assertEqual(ent.gdf["region_id"].min(), 756) - self.assertEqual(ent.gdf["region_id"].max(), 756) - self.assertEqual(ent.gdf["value"].sum(), 1.0) + self.assertEqual(ent.region_id.min(), 756) + self.assertEqual(ent.region_id.max(), 756) + self.assertEqual(ent.value.sum(), 1.0) self.assertEqual(ent.ref_year, 2015) def test_suriname30_nfw_pass(self): @@ -128,8 +128,8 @@ def test_suriname30_nfw_pass(self): country_name, reference_year=2016, fin_mode=fin_mode ) - self.assertEqual(ent.gdf["region_id"].min(), 740) - self.assertEqual(ent.gdf["region_id"].max(), 740) + self.assertEqual(ent.region_id.min(), 740) + self.assertEqual(ent.region_id.max(), 740) self.assertEqual(ent.ref_year, 2016) def test_switzerland300_admin1_pc2016_pass(self): @@ -164,23 +164,23 @@ def test_from_shape_zurich_pass(self): ent = lp.LitPop.from_shape( shape, total_value, res_arcsec=30, reference_year=2016 ) - self.assertEqual(ent.gdf["value"].sum(), 1000.0) - self.assertEqual(ent.gdf["value"].min(), 0.0) - self.assertEqual(ent.gdf["region_id"].min(), 756) - self.assertEqual(ent.gdf["region_id"].max(), 756) - self.assertAlmostEqual(ent.gdf["latitude"].min(), 47.20416666666661) + self.assertEqual(ent.value.sum(), 1000.0) + self.assertEqual(ent.value.min(), 0.0) + self.assertEqual(ent.region_id.min(), 756) + self.assertEqual(ent.region_id.max(), 756) + self.assertAlmostEqual(ent.latitude.min(), 47.20416666666661) # index and coord. of largest value: self.assertEqual( ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()].index[0], 482 ) self.assertAlmostEqual( - ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()]["latitude"].values[ + ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()].geometry.y.values[ 0 ], 47.34583333333325, ) self.assertAlmostEqual( - ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()]["longitude"].values[ + ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()].geometry.x.values[ 0 ], 8.529166666666658, @@ -193,22 +193,22 @@ def test_from_shape_and_countries_zurich_pass(self): ent = lp.LitPop.from_shape_and_countries( shape, "Switzerland", res_arcsec=30, reference_year=2016 ) - self.assertEqual(ent.gdf["value"].min(), 0.0) - self.assertEqual(ent.gdf["region_id"].min(), 756) - self.assertEqual(ent.gdf["region_id"].max(), 756) - self.assertAlmostEqual(ent.gdf["latitude"].min(), 47.20416666666661) + self.assertEqual(ent.value.min(), 0.0) + self.assertEqual(ent.region_id.min(), 756) + self.assertEqual(ent.region_id.max(), 756) + self.assertAlmostEqual(ent.latitude.min(), 47.20416666666661) # coord of largest value: self.assertEqual( - ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()].index[0], 434 + ent.gdf.loc[ent.gdf.value == ent.gdf.value.max()].index[0], 434 ) self.assertAlmostEqual( - ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()]["latitude"].values[ + ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()].geometry.y.values[ 0 ], 47.34583333333325, ) self.assertAlmostEqual( - ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()]["longitude"].values[ + ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()].geometry.x.values[ 0 ], 8.529166666666658, @@ -220,10 +220,10 @@ def test_Liechtenstein_15_lit_pass(self): ref_year = 2016 ent = lp.LitPop.from_nightlight_intensity(country_name, reference_year=ref_year) - self.assertEqual(ent.gdf["value"].sum(), 36469.0) - self.assertEqual(ent.gdf["region_id"][1], 438) + self.assertEqual(ent.value.sum(), 36469.0) + self.assertEqual(ent.region_id[1], 438) self.assertEqual(ent.value_unit, "") - self.assertAlmostEqual(ent.gdf["latitude"].max(), 47.260416666666664) + self.assertAlmostEqual(ent.latitude.max(), 47.260416666666664) self.assertAlmostEqual(ent.meta["transform"][4], -15 / 3600) def test_Liechtenstein_30_pop_pass(self): @@ -232,10 +232,10 @@ def test_Liechtenstein_30_pop_pass(self): ref_year = 2015 ent = lp.LitPop.from_population(country_name, reference_year=ref_year) - self.assertEqual(ent.gdf["value"].sum(), 30068.970703125) - self.assertEqual(ent.gdf["region_id"][1], 438) + self.assertEqual(ent.value.sum(), 30068.970703125) + self.assertEqual(ent.region_id[1], 438) self.assertEqual(ent.value_unit, "people") - self.assertAlmostEqual(ent.gdf["latitude"].max(), 47.2541666666666) + self.assertAlmostEqual(ent.latitude.max(), 47.2541666666666) self.assertAlmostEqual(ent.meta["transform"][0], 30 / 3600) def test_from_nightlight_intensity(self): @@ -331,8 +331,8 @@ def test_calc_admin1(self): ) self.assertEqual(ent.gdf.shape[0], 699) - self.assertEqual(ent.gdf["region_id"][88], 756) - self.assertAlmostEqual(ent.gdf["latitude"].max(), 47.708333333333336) + self.assertEqual(ent.region_id[88], 756) + self.assertAlmostEqual(ent.latitude.max(), 47.708333333333336) # shape must be same as with admin1_calc = False, otherwise there # is a problem with handling of the admin1 shapes: ent_adm0 = lp.LitPop.from_countries( diff --git a/climada/test/test_plot.py b/climada/test/test_plot.py index 082f38e1b0..04131741f2 100644 --- a/climada/test/test_plot.py +++ b/climada/test/test_plot.py @@ -161,12 +161,8 @@ def test_impact_pass(self): def test_ctx_osm_pass(self): """Test basemap function using osm images""" - myexp = Exposures() - myexp.gdf["latitude"] = np.array([30, 40, 50]) - myexp.gdf["longitude"] = np.array([0, 0, 0]) - myexp.gdf["value"] = np.array([1, 1, 1]) + myexp = Exposures(lat=[30, 40, 50], lon=[0, 0, 0], value=[1, 1, 1]) myexp.check() - myexp.plot_basemap(url=ctx.providers.OpenStreetMap.Mapnik) def test_disc_rates(self): diff --git a/climada/util/coordinates.py b/climada/util/coordinates.py index e160965b1c..cec74b512c 100644 --- a/climada/util/coordinates.py +++ b/climada/util/coordinates.py @@ -1100,7 +1100,7 @@ def match_centroids( Parameters ---------- coord_gdf : gpd.GeoDataFrame - GeoDataframe with defined latitude/longitude column and crs + GeoDataframe with defined geometry column and crs centroids : Centroids (Hazard) centroids to match (as raster or vector centroids). distance : str, optional @@ -1146,7 +1146,7 @@ def match_centroids( pass assigned = match_coordinates( - np.stack([coord_gdf["latitude"].values, coord_gdf["longitude"].values], axis=1), + np.stack([coord_gdf.geometry.y.values, coord_gdf.geometry.x.values], axis=1), centroids.coord, distance=distance, threshold=threshold, @@ -1911,6 +1911,8 @@ def equal_crs(crs_one, crs_two): """ if crs_one is None: return crs_two is None + if crs_two is None: + return False return rasterio.crs.CRS.from_user_input( crs_one ) == rasterio.crs.CRS.from_user_input(crs_two) @@ -2615,8 +2617,9 @@ def points_to_raster( Parameters ---------- - points_df : GeoDataFrame - contains columns latitude, longitude and those listed in the parameter `val_names`. + points_df : GeoDataFrame | DataFrame + contains columns listed in the parameter `val_names` and 'geometry' if it is a GeoDataFrame + or 'latitude' and 'longitude' if it is a DataFrame. val_names : list of str, optional The names of columns in `points_df` containing values. The raster will contain one band per column. Default: ['value'] @@ -2642,15 +2645,25 @@ def points_to_raster( """ if not val_names: val_names = ["value"] + + if "geometry" in points_df: + latval = points_df.geometry.y + lonval = points_df.geometry.x + else: + latval = points_df["latitude"].values + lonval = points_df["longitude"].values + if not res: - res = np.abs( - get_resolution(points_df["latitude"].values, points_df["longitude"].values) - ).min() + res = np.abs(get_resolution(latval, lonval)).min() if not raster_res: raster_res = res - def apply_box(df_exp): + if "geometry" in points_df: + fun = lambda r: r.geometry.buffer(res / 2).envelope + else: fun = lambda r: Point(r["longitude"], r["latitude"]).buffer(res / 2).envelope + + def apply_box(df_exp): return df_exp.apply(fun, axis=1) LOGGER.info("Raster from resolution %s to %s.", res, raster_res) @@ -2680,9 +2693,7 @@ def apply_box(df_exp): # renormalize longitude if necessary if equal_crs(df_poly.crs, DEF_CRS): - xmin, ymin, xmax, ymax = latlon_bounds( - points_df["latitude"].values, points_df["longitude"].values - ) + xmin, ymin, xmax, ymax = latlon_bounds(latval, lonval) x_mid = 0.5 * (xmin + xmax) # we don't really change the CRS when rewrapping, so we reset the CRS attribute afterwards df_poly = df_poly.to_crs({"proj": "longlat", "lon_wrap": x_mid}).set_crs( @@ -2690,10 +2701,10 @@ def apply_box(df_exp): ) else: xmin, ymin, xmax, ymax = ( - points_df["longitude"].min(), - points_df["latitude"].min(), - points_df["longitude"].max(), - points_df["latitude"].max(), + lonval.min(), + latval.min(), + lonval.max(), + latval.max(), ) # construct raster diff --git a/climada/util/lines_polys_handler.py b/climada/util/lines_polys_handler.py index 244658b184..ee2058a68d 100755 --- a/climada/util/lines_polys_handler.py +++ b/climada/util/lines_polys_handler.py @@ -420,10 +420,9 @@ def exp_geom_to_pnt(exp, res, to_meters, disagg_met, disagg_val): if disagg_met is DisaggMethod.DIV: gdf_pnt = _disagg_values_div(gdf_pnt) - # set lat lon and centroids + # set dataframe exp_pnt = exp.copy(deep=False) exp_pnt.set_gdf(gdf_pnt) - exp_pnt.set_lat_lon() return exp_pnt diff --git a/climada/util/test/test_lines_polys_handler.py b/climada/util/test/test_lines_polys_handler.py index 8800d6d06b..c320e894ab 100644 --- a/climada/util/test/test_lines_polys_handler.py +++ b/climada/util/test/test_lines_polys_handler.py @@ -68,9 +68,7 @@ def check_unchanged_geom_gdf(self, gdf_geom, gdf_pnt): sub_gdf_pnt = gdf_pnt.xs(n, level=1) rows_sel = sub_gdf_pnt.index.to_numpy() sub_gdf = gdf_geom.loc[rows_sel] - self.assertTrue( - np.alltrue(sub_gdf.geometry.geom_equals(sub_gdf_pnt.geometry_orig)) - ) + self.assertTrue(np.all(sub_gdf.geometry.geom_equals(sub_gdf_pnt.geometry_orig))) for col in gdf_pnt.columns: if col not in COL_CHANGING: np.testing.assert_allclose(gdf_pnt[col].unique(), gdf_geom[col].unique()) @@ -139,7 +137,7 @@ def test_point_exposure_from_polygons(self): 3.83689000e10, ] ) - np.testing.assert_allclose(exp_pnt.gdf["value"], val_avg) + np.testing.assert_allclose(exp_pnt.value, val_avg) lat = np.array( [ 53.15019278, @@ -160,7 +158,7 @@ def test_point_exposure_from_polygons(self): 52.11286591, ] ) - np.testing.assert_allclose(exp_pnt.gdf["latitude"], lat) + np.testing.assert_allclose(exp_pnt.latitude, lat) # to_meters=TRUE, FIX, dissag_val res = 20000 @@ -173,7 +171,7 @@ def test_point_exposure_from_polygons(self): ) self.check_unchanged_exp(EXP_POLY, exp_pnt) val = res**2 - self.assertEqual(np.unique(exp_pnt.gdf["value"])[0], val) + self.assertEqual(np.unique(exp_pnt.value)[0], val) lat = np.array( [ 53.13923671, @@ -252,7 +250,7 @@ def test_point_exposure_from_polygons(self): 52.23308448, ] ) - np.testing.assert_allclose(exp_pnt.gdf["latitude"], lat) + np.testing.assert_allclose(exp_pnt.latitude, lat) # projected crs, to_meters=TRUE, FIX, dissag_val res = 20000 @@ -336,8 +334,10 @@ def test_point_exposure_from_polygons_on_grid(self): disagg_val=None, ) self.check_unchanged_exp(exp_poly, exp_pnt_grid) - for col in ["value", "latitude", "longitude"]: - np.testing.assert_allclose(exp_pnt.gdf[col], exp_pnt_grid.gdf[col]) + + np.testing.assert_allclose(exp_pnt.value, exp_pnt_grid.value) + np.testing.assert_allclose(exp_pnt.latitude, exp_pnt_grid.latitude) + np.testing.assert_allclose(exp_pnt.longitude, exp_pnt_grid.longitude) x_grid = np.append(x_grid, x_grid + 10) y_grid = np.append(y_grid, y_grid + 10) @@ -356,8 +356,10 @@ def test_point_exposure_from_polygons_on_grid(self): disagg_val=None, ) self.check_unchanged_exp(exp_poly, exp_pnt_grid) - for col in ["value", "latitude", "longitude"]: - np.testing.assert_allclose(exp_pnt.gdf[col], exp_pnt_grid.gdf[col]) + + np.testing.assert_allclose(exp_pnt.value, exp_pnt_grid.value) + np.testing.assert_allclose(exp_pnt.latitude, exp_pnt_grid.latitude) + np.testing.assert_allclose(exp_pnt.longitude, exp_pnt_grid.longitude) def test_point_exposure_from_lines(self): """Test disaggregation of lines to points""" @@ -428,7 +430,7 @@ def test_point_exposure_from_lines(self): 50.9105503, ] ) - np.testing.assert_allclose(exp_pnt.gdf["latitude"], lat) + np.testing.assert_allclose(exp_pnt.latitude, lat) class TestGeomImpactCalcs(unittest.TestCase): @@ -568,7 +570,7 @@ def test_calc_geom_impact_points(self): aai_agg1 = 0.0470814 exp = EXP_POINT.copy() - exp.set_lat_lon() + # exp.set_lat_lon() imp11 = ImpactCalc(exp, IMPF_SET, HAZ).impact() check_impact(self, imp1, HAZ, EXP_POINT, aai_agg1, imp11.eai_exp) @@ -1180,7 +1182,7 @@ def test_swap_geom_cols(self): gdf_orig = GDF_POLY.copy() gdf_orig["new_geom"] = gdf_orig.geometry swap_gdf = u_lp._swap_geom_cols(gdf_orig, "old_geom", "new_geom") - self.assertTrue(np.alltrue(swap_gdf.geometry.geom_equals(gdf_orig.new_geom))) + self.assertTrue(np.all(swap_gdf.geometry.geom_equals(gdf_orig.new_geom))) if __name__ == "__main__": diff --git a/doc/tutorial/1_main_climada.ipynb b/doc/tutorial/1_main_climada.ipynb index 36ce87bb2e..7a9b45ab83 100644 --- a/doc/tutorial/1_main_climada.ipynb +++ b/doc/tutorial/1_main_climada.ipynb @@ -294,7 +294,6 @@ "\n", "min_lat, max_lat, min_lon, max_lon = 17.5, 19.0, -68.0, -65.0\n", "cent = Centroids.from_pnt_bounds((min_lon, min_lat, max_lon, max_lat), res=0.05)\n", - "cent.check()\n", "cent.plot();" ] }, @@ -302,8 +301,6 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Almost every class in CLIMADA has a `check()` method, as used above. This verifies that the necessary data for an object is correctly provided and logs the optional variables that are not present. It is always worth running it after filling an instance of an object.\n", - "\n", "### Hazard footprint\n", "\n", "Now we're ready to create our hazard object. This will be a `TropCyclone` class, which inherits from the `Hazard` class, and has the `from_tracks` constructor method to create a hazard from a `TCTracks` object at given centroids." @@ -339,7 +336,7 @@ "from climada.hazard import TropCyclone\n", "\n", "haz = TropCyclone.from_tracks(tracks, centroids=cent)\n", - "haz.check()" + "haz.check() # verifies that the necessary data for the Hazard object is correctly provided" ] }, { @@ -563,7 +560,6 @@ "exp_litpop = LitPop.from_countries(\n", " \"Puerto Rico\", res_arcsec=120\n", ") # We'll go lower resolution than default to keep it simple\n", - "exp_litpop.set_geometry_points() # Set geodataframe geometries from lat lon data\n", "\n", "exp_litpop.plot_hexbin(pop_name=True, linewidth=4, buffer=0.1);" ] diff --git a/doc/tutorial/climada_engine_Forecast.ipynb b/doc/tutorial/climada_engine_Forecast.ipynb index 29c9a5930f..2ab5bb8418 100644 --- a/doc/tutorial/climada_engine_Forecast.ipynb +++ b/doc/tutorial/climada_engine_Forecast.ipynb @@ -257,9 +257,7 @@ "### generate exposure\n", "# find out which hazard coord to consider\n", "CHE_borders = u_plot._get_borders(\n", - " np.stack(\n", - " [exposure.gdf[\"latitude\"].values, exposure.gdf[\"longitude\"].values], axis=1\n", - " )\n", + " np.stack([exposure.latitude, exposure.longitude], axis=1)\n", ")\n", "centroid_selection = np.logical_and(\n", " np.logical_and(\n", diff --git a/doc/tutorial/climada_engine_Impact.ipynb b/doc/tutorial/climada_engine_Impact.ipynb index b6ea21cd89..a342a43b39 100644 --- a/doc/tutorial/climada_engine_Impact.ipynb +++ b/doc/tutorial/climada_engine_Impact.ipynb @@ -68,7 +68,7 @@ "| event_id |list(int)| id (>0) of each hazard event (Hazard.event_id)|\n", "| event_name |(list(str))| name of each event (Hazard.event_name)|\n", "| date |np.array| date of events (Hazard.date)|\n", - "| coord_exp |np.array| exposures coordinates [lat, lon] (in degrees) (Exposure.gdf['latitudes'], Exposure.gdf['longitude'])|\n", + "| coord_exp |np.array| exposures coordinates [lat, lon] (in degrees) (Exposure.latidue, Exposure.longitude)|\n", "| frequency |np.array| frequency of events (Hazard.frequency)|\n", "| frequency_unit |str| unit of event frequency, by default '1/year', i.e., annual (Hazard.frequency_unit)|\n", "| unit |str| value unit used (Exposure.value_unit)|\n", @@ -1486,9 +1486,7 @@ "\n", "# Set Hazard in Exposures points\n", "# set centroids from exposures coordinates\n", - "centr_pnt = Centroids.from_lat_lon(\n", - " exp_pnt.gdf[\"latitude\"].values, exp_pnt.gdf[\"longitude\"].values, exp_pnt.crs\n", - ")\n", + "centr_pnt = Centroids.from_lat_lon(exp_pnt.latitude, exp_pnt.longitude, exp_pnt.crs)\n", "# compute Hazard in that centroids\n", "tr_pnt = TCTracks.from_ibtracs_netcdf(storm_id=\"2007314N10093\")\n", "tc_pnt = TropCyclone.from_tracks(tr_pnt, centroids=centr_pnt)\n", @@ -2007,9 +2005,7 @@ "\n", "# compute sequence of hazards using TropCyclone video_intensity method\n", "exp_sea = add_sea(exp_video, (100, 5))\n", - "centr_video = Centroids.from_lat_lon(\n", - " exp_sea.gdf[\"latitude\"].values, exp_sea.gdf[\"longitude\"].values\n", - ")\n", + "centr_video = Centroids.from_lat_lon(exp_sea.latitude, exp_sea.longitude)\n", "centr_video.check()\n", "\n", "track_name = \"2017242N16333\"\n", diff --git a/doc/tutorial/climada_entity_Exposures.ipynb b/doc/tutorial/climada_entity_Exposures.ipynb index d46903e8f2..a57079ef20 100644 --- a/doc/tutorial/climada_entity_Exposures.ipynb +++ b/doc/tutorial/climada_entity_Exposures.ipynb @@ -28,8 +28,11 @@ "\n", "### What does an exposure look like in CLIMADA?\n", "\n", - "An exposure is represented in the class `Exposures`, which contains a [geopandas](https://geopandas.readthedocs.io/en/latest/gallery/cartopy_convert.html) [GeoDataFrame](https://geopandas.readthedocs.io/en/latest/docs/user_guide/data_structures.html#geodataframe) that is accessible through the `Exposures` `gdf` attribute.\n", - "Certain columns of `gdf` _have to_ be specified, while others are optional (this means that the package `climada.engine` also works without these variables set.) The full list looks like this:" + "An exposure is represented in the class `Exposures`, which contains a [geopandas](https://geopandas.readthedocs.io/en/latest/gallery/cartopy_convert.html) [GeoDataFrame](https://geopandas.readthedocs.io/en/latest/docs/user_guide/data_structures.html#geodataframe) that is accessible through the `Exposures.data` attribute.\n", + "A \"geometry\" column is initialized in the `GeoDataFrame` of the `Exposures` object, other columns are optional at first but some have to be present or make a difference when it comes to do calculations.\n", + "Apart from these special columns the data frame may contain additional columns, they will simply be ignored in the context of CLIMADA.\n", + "\n", + "The full list of meaningful columns is this:" ] }, { @@ -37,37 +40,47 @@ "metadata": {}, "source": [ "\n", - "
\n", + "| Column | Data Type | Description | Meaningful in | Optional |\n", + "| :-------------------- | :------------ | :------------------------------------------------------------------------------------- | - | :-: |\n", + "| `geometry` | Point | the geometry column of the `GeoDataFrame`, i.e., latitude (y) and longitude (x) | centroids assignment | - |\n", + "| `value` | float | a value for each exposure | impact calculation | ✔* |\n", + "| `impf_*` | int | impact functions ids for hazard types.
important attribute, since it relates the exposures to the hazard by specifying the impf_act functions.
Ideally it should be set to the specific hazard (e.g. `impf_TC`) so that different hazards can be set
in the same Exposures (e.g. `impf_TC` and `impf_FL`). | impact calculation | ✔* |\n", + "| `centr_*` | int | centroids index for hazard type.
There might be different hazards defined: centr_TC, centr_FL, ...
Computed in method `assign_centroids()` | impact calculation | ✔* |\n", + "| `deductible` | float | deductible value for each exposure.
Used for insurance | impact calculation | ✔ |\n", + "| `cover` | float | cover value for each exposure.
Used for insurance | impact calculation | ✔ |\n", + "| `region_id` | int | region id (e.g. country ISO code) for each exposure | aggregation | ✔ |\n", + "| `category_id` | int | category id (e.g. building code) for each exposure | aggregation | ✔ |\n", "\n", - "| Mandatory columns | Data Type | Description |\n", - "| :-------------------- | :------------ | :------------------------------------------------------------------------------------- |\n", - "| `latitude` | float | latitude |\n", - "| `longitude` | float | longitude |\n", - "| `value` | float | a value for each exposure                                                                                                                 |\n", + "*) an Exposures object is valid without such a column, but it's required for impact calculation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Apart from `data` the `Exposures` object has the following attributes and properties:\n", "\n", "
\n", - "
\n", "\n", - "| Optional columns | Data Type | Description |\n", + "| Attributes | Data Type | Description |\n", "| :-------------------- | :------------ | :------------------------------------------------------------------------------------- |\n", - "| `impf_*` | int | impact functions ids for hazard types.
important attribute, since it relates the exposures to the hazard by specifying the impf_act functions.
Ideally it should be set to the specific hazard (e.g. `impf_TC`) so that different hazards can be set
in the same Exposures (e.g. `impf_TC` and `impf_FL`).
If not provided, set to default `impf_` with ids 1 in check(). |\n", - "| `geometry` | Point | geometry of type Point
Main feature of geopandas DataFrame extension
Computed in method `set_geometry_points()` |\n", - "| `deductible` | float | deductible value for each exposure.
Used for insurance |\n", - "| `cover` | float | cover value for each exposure.
Used for insurance |\n", - "| `category_id` | int | category id (e.g. building code) for each exposure |\n", - "| `region_id` | int | region id (e.g. country ISO code) for each exposure |\n", - "| `impf_*` | int | centroids index for hazard type.
There might be different hazards defined: centr_TC, centr_FL, ...
Computed in method `assign_centroids()` |\n", + "| `description` | str | describing origin and content of the exposures data |\n", + "| `ref_year` | int | reference year |\n", + "| `value_unit` | str | unit of the exposures' values |\n", "\n", "
\n", "
\n", "\n", - "| Metadata variables | Data Type | Description |\n", + "| Properties | Data Type | Description |\n", "| :-------------------- | :------------ | :------------------------------------------------------------------------------------- |\n", - "| `crs` | str or int | coordinate reference system, see GeoDataFrame.crs |\n", - "| `description` | str | describing origin and content of the exposures data |\n", - "| `ref_year` | int | reference year |\n", - "| `value_unit` | str | unit of the exposures' values |\n", - "| `meta` | dict | dictionary containing corresponding raster properties (if any):
width, height, crs and transform must be present at least (transform needs to contain upper left corner!).
Exposures might not contain all the points of the corresponding raster. |\n" + "| `geometry` | numpy.array[Point] | array of geometry values |\n", + "| `crs` | pyproj.CRS | coordinate reference system, see
GeoDataFrame.crs |\n", + "| `latitude` | numpy.array[float] | array of latitude values |\n", + "| `longitude` | numpy.array[float] | array of longitude values |\n", + "| `region_id` | numpy.array[int] | array of regeion_id values |\n", + "| `category_id` | numpy.array[int] | array of category_id values |\n", + "| `cover` | numpy.array[float] | array of cover values |\n", + "| `deductible` | numpy.array[float] | array of cover values |" ] }, { @@ -85,25 +98,71 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Exposures from a pandas DataFrame\n", - "\n", - "In case you are unfamiliar with the data structure, check out the [pandas DataFrame documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html)." + "### Exposures from plain data" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "metadata": {}, "outputs": [ { - "name": "stderr", + "name": "stdout", "output_type": "stream", "text": [ - "/Users/lseverino/miniforge3/envs/climada_env/lib/python3.9/site-packages/dask/dataframe/_pyarrow_compat.py:17: FutureWarning: Minimal version of pyarrow will soon be increased to 14.0.1. You are using 12.0.1. Please consider upgrading.\n", - " warnings.warn(\n" + "description: random values in a square\n", + "ref_year: 2018\n", + "value_unit: CHF\n", + "crs: EPSG:7316\n", + "data: (9 entries)\n", + " region_id impf_ geometry value\n", + "0 1 0 POINT (4.000 1.000) 0.035321\n", + "1 1 1 POINT (4.000 2.000) 0.570256\n", + "2 1 2 POINT (4.000 3.000) 0.927632\n", + "3 1 3 POINT (5.000 1.000) 0.805402\n", + "4 1 4 POINT (5.000 2.000) 0.236179\n", + "5 1 5 POINT (5.000 3.000) 0.848296\n", + "6 1 6 POINT (6.000 1.000) 0.520281\n", + "7 1 7 POINT (6.000 2.000) 0.036442\n", + "8 1 8 POINT (6.000 3.000) 0.780934\n" ] } ], + "source": [ + "import numpy as np\n", + "from climada.entity import Exposures\n", + "\n", + "latitude = [1, 2, 3] * 3\n", + "longitude = [4] * 3 + [5] * 3 + [6] * 3\n", + "exp_arr = Exposures(\n", + " lat=latitude, # list or array\n", + " lon=longitude, # instead of lat and lon one can provide an array of Points through the geometry argument\n", + " value=np.random.random_sample(len(latitude)), # a list or an array of floats\n", + " value_unit=\"CHF\",\n", + " crs=\"EPSG:7316\", # different formats are possible\n", + " description=\"random values in a square\",\n", + " data={\n", + " \"region_id\": 1,\n", + " \"impf_\": range(len(latitude)),\n", + " }, # data can also be an array or a data frame\n", + ")\n", + "print(exp_arr)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Exposures from a pandas DataFrame\n", + "\n", + "In case you are unfamiliar with the data structure, check out the [pandas DataFrame documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], "source": [ "import numpy as np\n", "from pandas import DataFrame\n", @@ -114,7 +173,7 @@ "exp_df = DataFrame()\n", "n_exp = 100 * 100\n", "# provide value\n", - "exp_df[\"value\"] = np.arange(n_exp)\n", + "exp_df[\"value\"] = np.random.random_sample(n_exp)\n", "# provide latitude and longitude\n", "lat, lon = np.mgrid[\n", " 15 : 35 : complex(0, np.sqrt(n_exp)), 20 : 40 : complex(0, np.sqrt(n_exp))\n", @@ -125,47 +184,153 @@ }, { "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "# For each exposure entry, specify which impact function should be taken for which hazard type.\n", - "# In this case, we only specify the IDs for tropical cyclone (TC); here, each exposure entry will be treated with\n", - "# the same impact function: the one that has ID '1':\n", - "# Of course, this will only be relevant at later steps during impact calculations.\n", - "exp_df[\"impf_TC\"] = np.ones(n_exp, int)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, + "execution_count": 23, "metadata": {}, "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "exp_df is a DataFrame: \n", - "exp_df looks like:\n", - " value latitude longitude impf_TC\n", - "0 0 15.0 20.000000 1\n", - "1 1 15.0 20.202020 1\n", - "2 2 15.0 20.404040 1\n", - "3 3 15.0 20.606061 1\n", - "4 4 15.0 20.808081 1\n" - ] + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
valuelatitudelongitudeimpf_TC
00.53376415.020.0000001
10.99599315.020.2020201
20.60352315.020.4040401
30.75425315.020.6060611
40.30506615.020.8080811
...............
99950.48241635.039.1919191
99960.06904435.039.3939391
99970.11656035.039.5959601
99980.23985635.039.7979801
99990.09956835.040.0000001
\n", + "

10000 rows × 4 columns

\n", + "
" + ], + "text/plain": [ + " value latitude longitude impf_TC\n", + "0 0.533764 15.0 20.000000 1\n", + "1 0.995993 15.0 20.202020 1\n", + "2 0.603523 15.0 20.404040 1\n", + "3 0.754253 15.0 20.606061 1\n", + "4 0.305066 15.0 20.808081 1\n", + "... ... ... ... ...\n", + "9995 0.482416 35.0 39.191919 1\n", + "9996 0.069044 35.0 39.393939 1\n", + "9997 0.116560 35.0 39.595960 1\n", + "9998 0.239856 35.0 39.797980 1\n", + "9999 0.099568 35.0 40.000000 1\n", + "\n", + "[10000 rows x 4 columns]" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ - "# Let's have a look at the pandas DataFrame\n", - "print(\"exp_df is a DataFrame:\", str(type(exp_df)))\n", - "print(\"exp_df looks like:\")\n", - "print(exp_df.head())" + "# For each exposure entry, specify which impact function should be taken for which hazard type.\n", + "# In this case, we only specify the IDs for tropical cyclone (TC); here, each exposure entry will be treated with\n", + "# the same impact function: the one that has ID '1':\n", + "# Of course, this will only be relevant at later steps during impact calculations.\n", + "exp_df[\"impf_TC\"] = np.ones(n_exp, int)\n", + "exp_df" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 24, "metadata": {}, "outputs": [ { @@ -174,22 +339,7 @@ "text": [ "exp has the type: \n", "and contains a GeoDataFrame exp.gdf: \n", - "2024-04-12 14:39:01,086 - climada.util.coordinates - INFO - Setting geometry points.\n", - "\n", - "check method logs:\n", - "2024-04-12 14:39:01,093 - climada.entity.exposures.base - INFO - category_id not set.\n", - "2024-04-12 14:39:01,093 - climada.entity.exposures.base - INFO - cover not set.\n", - "2024-04-12 14:39:01,093 - climada.entity.exposures.base - INFO - deductible not set.\n", - "2024-04-12 14:39:01,094 - climada.entity.exposures.base - INFO - region_id not set.\n", - "2024-04-12 14:39:01,094 - climada.entity.exposures.base - INFO - centr_ not set.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/lseverino/Documents/PhD/workspace/climada_python/climada/util/coordinates.py:2755: FutureWarning: You are adding a column named 'geometry' to a GeoDataFrame constructed without an active geometry column. Currently, this automatically sets the active geometry column to 'geometry' but in the future that will no longer happen. Instead, either provide geometry to the GeoDataFrame constructor (GeoDataFrame(... geometry=GeoSeries()) or use `set_geometry('geometry')` to explicitly set the active geometry column.\n", - " df_val['geometry'] = gpd.GeoSeries(\n" + "\n" ] } ], @@ -197,55 +347,38 @@ "# Generate Exposures from the pandas DataFrame. This step converts the DataFrame into\n", "# a CLIMADA Exposures instance!\n", "exp = Exposures(exp_df)\n", - "print(\"exp has the type:\", str(type(exp)))\n", - "print(\"and contains a GeoDataFrame exp.gdf:\", str(type(exp.gdf)))\n", - "\n", - "# set geometry attribute (shapely Points) from GeoDataFrame from latitude and longitude\n", - "exp.set_geometry_points()\n", - "print(\"\\n\" + \"check method logs:\")\n", - "\n", - "# always apply the check() method in the end. It puts metadata that has not been assigned,\n", - "# and points out missing mandatory data\n", - "exp.check()" + "print(f\"exp has the type: {type(exp)}\")\n", + "print(f\"and contains a GeoDataFrame exp.gdf: {type(exp.gdf)}\\n\")" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 25, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\n", - "exp looks like:\n", "description: None\n", "ref_year: 2018\n", "value_unit: USD\n", - "meta: {'crs': 'EPSG:4326'}\n", "crs: EPSG:4326\n", - "data:\n", - " value latitude longitude impf_TC geometry\n", - "0 0 15.0 20.000000 1 POINT (20.00000 15.00000)\n", - "1 1 15.0 20.202020 1 POINT (20.20202 15.00000)\n", - "2 2 15.0 20.404040 1 POINT (20.40404 15.00000)\n", - "3 3 15.0 20.606061 1 POINT (20.60606 15.00000)\n", - "4 4 15.0 20.808081 1 POINT (20.80808 15.00000)\n", - "... ... ... ... ... ...\n", - "9995 9995 35.0 39.191919 1 POINT (39.19192 35.00000)\n", - "9996 9996 35.0 39.393939 1 POINT (39.39394 35.00000)\n", - "9997 9997 35.0 39.595960 1 POINT (39.59596 35.00000)\n", - "9998 9998 35.0 39.797980 1 POINT (39.79798 35.00000)\n", - "9999 9999 35.0 40.000000 1 POINT (40.00000 35.00000)\n", - "\n", - "[10000 rows x 5 columns]\n" + "data: (10000 entries)\n", + " value impf_TC geometry\n", + "0 0.533764 1 POINT (20.00000 15.00000)\n", + "1 0.995993 1 POINT (20.20202 15.00000)\n", + "2 0.603523 1 POINT (20.40404 15.00000)\n", + "3 0.754253 1 POINT (20.60606 15.00000)\n", + "9996 0.069044 1 POINT (39.39394 35.00000)\n", + "9997 0.116560 1 POINT (39.59596 35.00000)\n", + "9998 0.239856 1 POINT (39.79798 35.00000)\n", + "9999 0.099568 1 POINT (40.00000 35.00000)\n" ] } ], "source": [ "# let's have a look at the Exposures instance we created!\n", - "print(\"\\n\" + \"exp looks like:\")\n", "print(exp)" ] }, @@ -262,28 +395,14 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 7, "metadata": {}, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "World is a GeoDataFrame: \n", - "World looks like:\n", - " name geometry\n", - "0 Vatican City POINT (12.45339 41.90328)\n", - "1 San Marino POINT (12.44177 43.93610)\n", - "2 Vaduz POINT (9.51667 47.13372)\n", - "3 Lobamba POINT (31.20000 -26.46667)\n", - "4 Luxembourg POINT (6.13000 49.61166)\n" - ] - }, { "name": "stderr", "output_type": "stream", "text": [ - "/var/folders/y5/t1z41tgj7dv50sm2_29dn8740000gp/T/ipykernel_16894/4205155986.py:6: FutureWarning: The geopandas.dataset module is deprecated and will be removed in GeoPandas 1.0. You can get the original 'naturalearth_cities' data from https://www.naturalearthdata.com/downloads/110m-cultural-vectors/.\n", + "C:\\Users\\me\\AppData\\Local\\Temp\\ipykernel_31104\\2272990317.py:6: FutureWarning: The geopandas.dataset module is deprecated and will be removed in GeoPandas 1.0. You can get the original 'naturalearth_cities' data from https://www.naturalearthdata.com/downloads/110m-cultural-vectors/.\n", " world = gpd.read_file(gpd.datasets.get_path('naturalearth_cities'))\n" ] } @@ -294,54 +413,66 @@ "from climada.entity import Exposures\n", "\n", "# Read spatial info from an external file into GeoDataFrame\n", - "world = gpd.read_file(gpd.datasets.get_path(\"naturalearth_cities\"))\n", - "print(\"World is a GeoDataFrame:\", str(type(world)))\n", - "print(\"World looks like:\")\n", - "print(world.head())" + "world = gpd.read_file(gpd.datasets.get_path(\"naturalearth_cities\"))" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 27, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\n", - "exp_gpd is an Exposures: \n", - "2024-04-12 14:41:09,131 - climada.entity.exposures.base - INFO - Setting latitude and longitude attributes.\n" + "description: None\n", + "ref_year: 2018\n", + "value_unit: USD\n", + "crs: EPSG:4326\n", + "data: (243 entries)\n", + " name value geometry\n", + "0 Vatican City 0.876947 POINT (12.45339 41.90328)\n", + "1 San Marino 0.895454 POINT (12.44177 43.93610)\n", + "2 Vaduz 0.373366 POINT (9.51667 47.13372)\n", + "3 Lobamba 0.422729 POINT (31.20000 -26.46667)\n", + "239 São Paulo 0.913955 POINT (-46.62697 -23.55673)\n", + "240 Sydney 0.514479 POINT (151.21255 -33.87137)\n", + "241 Singapore 0.830635 POINT (103.85387 1.29498)\n", + "242 Hong Kong 0.764571 POINT (114.18306 22.30693)\n" ] } ], "source": [ "# Generate Exposures: value, latitude and longitude for each exposure entry.\n", + "world[\"value\"] = np.arange(n_exp)\n", "# Convert GeoDataFrame into Exposure instance\n", "exp_gpd = Exposures(world)\n", - "print(\"\\n\" + \"exp_gpd is an Exposures:\", str(type(exp_gpd)))\n", - "# add random values to entries\n", - "exp_gpd.gdf[\"value\"] = np.arange(world.shape[0])\n", - "# set latitude and longitude attributes from geometry\n", - "exp_gpd.set_lat_lon()" + "print(exp_gpd)" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 28, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\n", - "check method logs:\n", - "2024-04-12 14:41:24,338 - climada.entity.exposures.base - INFO - category_id not set.\n", - "2024-04-12 14:41:24,341 - climada.entity.exposures.base - INFO - cover not set.\n", - "2024-04-12 14:41:24,343 - climada.entity.exposures.base - INFO - deductible not set.\n", - "2024-04-12 14:41:24,344 - climada.entity.exposures.base - INFO - region_id not set.\n", - "2024-04-12 14:41:24,344 - climada.entity.exposures.base - INFO - centr_ not set.\n" + "description: None\n", + "ref_year: 2018\n", + "value_unit: USD\n", + "crs: EPSG:4326\n", + "data: (243 entries)\n", + " name value geometry impf_TC\n", + "0 Vatican City 0.876947 POINT (12.45339 41.90328) 1\n", + "1 San Marino 0.895454 POINT (12.44177 43.93610) 1\n", + "2 Vaduz 0.373366 POINT (9.51667 47.13372) 1\n", + "3 Lobamba 0.422729 POINT (31.20000 -26.46667) 1\n", + "239 São Paulo 0.913955 POINT (-46.62697 -23.55673) 1\n", + "240 Sydney 0.514479 POINT (151.21255 -33.87137) 1\n", + "241 Singapore 0.830635 POINT (103.85387 1.29498) 1\n", + "242 Hong Kong 0.764571 POINT (114.18306 22.30693) 1\n" ] } ], @@ -350,73 +481,7 @@ "# In this case, we only specify the IDs for tropical cyclone (TC); here, each exposure entry will be treated with\n", "# the same impact function: the one that has ID '1':\n", "# Of course, this will only be relevant at later steps during impact calculations.\n", - "exp_gpd.gdf[\"impf_TC\"] = np.ones(world.shape[0], int)\n", - "print(\"\\n\" + \"check method logs:\")\n", - "\n", - "# as always, run check method to assign meta-data and check for missing mandatory variables.\n", - "exp_gpd.check()" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "\u001b[1;03;30;30mexp_gpd looks like:\u001b[0m\n", - "ref_year: 2018\n", - "value_unit: USD\n", - "meta: {'crs': \n", - "Name: WGS 84\n", - "Axis Info [ellipsoidal]:\n", - "- Lat[north]: Geodetic latitude (degree)\n", - "- Lon[east]: Geodetic longitude (degree)\n", - "Area of Use:\n", - "- name: World.\n", - "- bounds: (-180.0, -90.0, 180.0, 90.0)\n", - "Datum: World Geodetic System 1984\n", - "- Ellipsoid: WGS 84\n", - "- Prime Meridian: Greenwich\n", - "}\n", - "crs: epsg:4326\n", - "data:\n", - " name geometry value latitude longitude \\\n", - "0 Vatican City POINT (12.45339 41.90328) 0 41.903282 12.453387 \n", - "1 San Marino POINT (12.44177 43.93610) 1 43.936096 12.441770 \n", - "2 Vaduz POINT (9.51667 47.13372) 2 47.133724 9.516669 \n", - "3 Luxembourg POINT (6.13000 49.61166) 3 49.611660 6.130003 \n", - "4 Palikir POINT (158.14997 6.91664) 4 6.916644 158.149974 \n", - ".. ... ... ... ... ... \n", - "197 Cairo POINT (31.24802 30.05191) 197 30.051906 31.248022 \n", - "198 Tokyo POINT (139.74946 35.68696) 198 35.686963 139.749462 \n", - "199 Paris POINT (2.33139 48.86864) 199 48.868639 2.331389 \n", - "200 Santiago POINT (-70.66899 -33.44807) 200 -33.448068 -70.668987 \n", - "201 Singapore POINT (103.85387 1.29498) 201 1.294979 103.853875 \n", - "\n", - " impf_TC \n", - "0 1 \n", - "1 1 \n", - "2 1 \n", - "3 1 \n", - "4 1 \n", - ".. ... \n", - "197 1 \n", - "198 1 \n", - "199 1 \n", - "200 1 \n", - "201 1 \n", - "\n", - "[202 rows x 6 columns]\n" - ] - } - ], - "source": [ - "# let's have a look at the Exposures instance we created!\n", - "print(\"\\n\" + \"\\x1b[1;03;30;30m\" + \"exp_gpd looks like:\" + \"\\x1b[0m\")\n", + "exp_gpd.data[\"impf_TC\"] = np.ones(world.shape[0], int)\n", "print(exp_gpd)" ] }, @@ -429,7 +494,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 30, "metadata": {}, "outputs": [ { @@ -462,73 +527,197 @@ " \n", " \n", " name\n", - " geometry\n", " value\n", - " latitude\n", - " longitude\n", " impf_TC\n", + " geometry\n", " \n", " \n", " \n", " \n", " 11\n", " Tarawa\n", - " POINT (173.01757 1.33819)\n", - " 11\n", - " 1.338188\n", - " 173.017571\n", + " 0.107688\n", " 1\n", + " POINT (173.01757 1.33819)\n", " \n", " \n", " 15\n", " Kigali\n", - " POINT (30.05859 -1.95164)\n", - " 15\n", - " -1.951644\n", - " 30.058586\n", + " 0.218687\n", " 1\n", + " POINT (30.05859 -1.95164)\n", " \n", " \n", " 17\n", " Juba\n", - " POINT (31.58003 4.82998)\n", - " 17\n", - " 4.829975\n", - " 31.580026\n", + " 0.763743\n", " 1\n", + " POINT (31.58003 4.82998)\n", " \n", " \n", " 31\n", " Putrajaya\n", - " POINT (101.69504 2.93252)\n", - " 31\n", - " 2.932515\n", - " 101.695037\n", + " 0.533607\n", " 1\n", + " POINT (101.69504 2.93252)\n", " \n", " \n", " 37\n", " Bujumbura\n", + " 0.127881\n", + " 1\n", " POINT (29.36001 -3.37609)\n", - " 37\n", - " -3.376087\n", - " 29.360006\n", + " \n", + " \n", + " 58\n", + " Kampala\n", + " 0.079019\n", + " 1\n", + " POINT (32.58138 0.31860)\n", + " \n", + " \n", + " 75\n", + " Mogadishu\n", + " 0.696766\n", + " 1\n", + " POINT (45.36473 2.06863)\n", + " \n", + " \n", + " 88\n", + " Quito\n", + " 0.212070\n", + " 1\n", + " POINT (-78.50200 -0.21304)\n", + " \n", + " \n", + " 93\n", + " Malabo\n", + " 0.088459\n", + " 1\n", + " POINT (8.78328 3.75002)\n", + " \n", + " \n", + " 99\n", + " Libreville\n", + " 0.929139\n", + " 1\n", + " POINT (9.45796 0.38539)\n", + " \n", + " \n", + " 108\n", + " Brazzaville\n", + " 0.795766\n", + " 1\n", + " POINT (15.28274 -4.25724)\n", + " \n", + " \n", + " 113\n", + " Bandar Seri Begawan\n", + " 0.655856\n", + " 1\n", + " POINT (114.93328 4.88333)\n", + " \n", + " \n", + " 116\n", + " Bangui\n", + " 0.398002\n", + " 1\n", + " POINT (18.55829 4.36664)\n", + " \n", + " \n", + " 117\n", + " Yaoundé\n", + " 0.240599\n", + " 1\n", + " POINT (11.51470 3.86865)\n", + " \n", + " \n", + " 134\n", + " Victoria\n", + " 0.956208\n", + " 1\n", + " POINT (55.44999 -4.61663)\n", + " \n", + " \n", + " 135\n", + " São Tomé\n", + " 0.726704\n", + " 1\n", + " POINT (6.72965 0.33747)\n", + " \n", + " \n", + " 138\n", + " Malé\n", + " 0.996017\n", + " 1\n", + " POINT (73.50890 4.17204)\n", + " \n", + " \n", + " 158\n", + " Kuala Lumpur\n", + " 0.880473\n", + " 1\n", + " POINT (101.68870 3.13980)\n", + " \n", + " \n", + " 201\n", + " Kinshasa\n", + " 0.074387\n", + " 1\n", + " POINT (15.31303 -4.32778)\n", + " \n", + " \n", + " 228\n", + " Nairobi\n", + " 0.297170\n", + " 1\n", + " POINT (36.81471 -1.28140)\n", + " \n", + " \n", + " 230\n", + " Bogota\n", + " 0.420891\n", " 1\n", + " POINT (-74.08529 4.59837)\n", + " \n", + " \n", + " 241\n", + " Singapore\n", + " 0.830635\n", + " 1\n", + " POINT (103.85387 1.29498)\n", " \n", " \n", "\n", "" ], "text/plain": [ - " name geometry value latitude longitude impf_TC\n", - "11 Tarawa POINT (173.01757 1.33819) 11 1.338188 173.017571 1\n", - "15 Kigali POINT (30.05859 -1.95164) 15 -1.951644 30.058586 1\n", - "17 Juba POINT (31.58003 4.82998) 17 4.829975 31.580026 1\n", - "31 Putrajaya POINT (101.69504 2.93252) 31 2.932515 101.695037 1\n", - "37 Bujumbura POINT (29.36001 -3.37609) 37 -3.376087 29.360006 1" + " name value impf_TC geometry\n", + "11 Tarawa 0.107688 1 POINT (173.01757 1.33819)\n", + "15 Kigali 0.218687 1 POINT (30.05859 -1.95164)\n", + "17 Juba 0.763743 1 POINT (31.58003 4.82998)\n", + "31 Putrajaya 0.533607 1 POINT (101.69504 2.93252)\n", + "37 Bujumbura 0.127881 1 POINT (29.36001 -3.37609)\n", + "58 Kampala 0.079019 1 POINT (32.58138 0.31860)\n", + "75 Mogadishu 0.696766 1 POINT (45.36473 2.06863)\n", + "88 Quito 0.212070 1 POINT (-78.50200 -0.21304)\n", + "93 Malabo 0.088459 1 POINT (8.78328 3.75002)\n", + "99 Libreville 0.929139 1 POINT (9.45796 0.38539)\n", + "108 Brazzaville 0.795766 1 POINT (15.28274 -4.25724)\n", + "113 Bandar Seri Begawan 0.655856 1 POINT (114.93328 4.88333)\n", + "116 Bangui 0.398002 1 POINT (18.55829 4.36664)\n", + "117 Yaoundé 0.240599 1 POINT (11.51470 3.86865)\n", + "134 Victoria 0.956208 1 POINT (55.44999 -4.61663)\n", + "135 São Tomé 0.726704 1 POINT (6.72965 0.33747)\n", + "138 Malé 0.996017 1 POINT (73.50890 4.17204)\n", + "158 Kuala Lumpur 0.880473 1 POINT (101.68870 3.13980)\n", + "201 Kinshasa 0.074387 1 POINT (15.31303 -4.32778)\n", + "228 Nairobi 0.297170 1 POINT (36.81471 -1.28140)\n", + "230 Bogota 0.420891 1 POINT (-74.08529 4.59837)\n", + "241 Singapore 0.830635 1 POINT (103.85387 1.29498)" ] }, - "execution_count": 11, + "execution_count": 30, "metadata": {}, "output_type": "execute_result" } @@ -536,15 +725,15 @@ "source": [ "# Example 1: extract data in a region: latitudes between -5 and 5\n", "sel_exp = exp_gpd.copy() # to keep the original exp_gpd Exposures data\n", - "sel_exp.gdf = sel_exp.gdf.cx[:, -5:5]\n", + "sel_exp.data = sel_exp.data.cx[:, -5:5]\n", "\n", "print(\"\\n\" + \"sel_exp contains a subset of the original data\")\n", - "sel_exp.gdf.head()" + "sel_exp.data" ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 29, "metadata": {}, "outputs": [ { @@ -577,83 +766,69 @@ " \n", " \n", " name\n", - " geometry\n", " value\n", - " latitude\n", - " longitude\n", " impf_TC\n", + " geometry\n", " \n", " \n", " \n", " \n", " 36\n", " Porto-Novo\n", - " POINT (2.61663 6.48331)\n", - " 36\n", - " 6.483311\n", - " 2.616626\n", + " 0.573619\n", " 1\n", + " POINT (2.61663 6.48331)\n", " \n", " \n", " 46\n", " Lomé\n", - " POINT (1.22081 6.13388)\n", - " 46\n", - " 6.133883\n", - " 1.220811\n", + " 0.176892\n", " 1\n", + " POINT (1.22081 6.13388)\n", " \n", " \n", " 93\n", " Malabo\n", - " POINT (8.78328 3.75002)\n", - " 93\n", - " 3.750015\n", - " 8.783278\n", + " 0.088459\n", " 1\n", + " POINT (8.78328 3.75002)\n", " \n", " \n", " 123\n", " Cotonou\n", - " POINT (2.40435 6.36298)\n", - " 123\n", - " 6.362980\n", - " 2.404355\n", + " 0.441703\n", " 1\n", + " POINT (2.40435 6.36298)\n", " \n", " \n", " 135\n", " São Tomé\n", - " POINT (6.72965 0.33747)\n", - " 135\n", - " 0.337466\n", - " 6.729650\n", + " 0.726704\n", " 1\n", + " POINT (6.72965 0.33747)\n", " \n", " \n", " 225\n", " Lagos\n", - " POINT (3.38959 6.44521)\n", - " 225\n", - " 6.445208\n", - " 3.389585\n", + " 0.990135\n", " 1\n", + " POINT (3.38959 6.44521)\n", " \n", " \n", "\n", "" ], "text/plain": [ - " name geometry value latitude longitude impf_TC\n", - "36 Porto-Novo POINT (2.61663 6.48331) 36 6.483311 2.616626 1\n", - "46 Lomé POINT (1.22081 6.13388) 46 6.133883 1.220811 1\n", - "93 Malabo POINT (8.78328 3.75002) 93 3.750015 8.783278 1\n", - "123 Cotonou POINT (2.40435 6.36298) 123 6.362980 2.404355 1\n", - "135 São Tomé POINT (6.72965 0.33747) 135 0.337466 6.729650 1\n", - "225 Lagos POINT (3.38959 6.44521) 225 6.445208 3.389585 1" + " name value impf_TC geometry\n", + "36 Porto-Novo 0.573619 1 POINT (2.61663 6.48331)\n", + "46 Lomé 0.176892 1 POINT (1.22081 6.13388)\n", + "93 Malabo 0.088459 1 POINT (8.78328 3.75002)\n", + "123 Cotonou 0.441703 1 POINT (2.40435 6.36298)\n", + "135 São Tomé 0.726704 1 POINT (6.72965 0.33747)\n", + "225 Lagos 0.990135 1 POINT (3.38959 6.44521)" ] }, - "execution_count": 12, + "execution_count": 29, "metadata": {}, "output_type": "execute_result" } @@ -665,23 +840,22 @@ "sel_polygon = exp_gpd.copy() # to keep the original exp_gpd Exposures data\n", "\n", "poly = Polygon([(0, -10), (0, 10), (10, 5)])\n", - "sel_polygon.gdf = sel_polygon.gdf[sel_polygon.gdf.intersects(poly)]\n", + "sel_polygon.data = sel_polygon.gdf[sel_polygon.gdf.intersects(poly)]\n", "\n", "# Let's have a look. Again, the sub-selection is a GeoDataFrame!\n", "print(\"\\n\" + \"sel_exp contains a subset of the original data\")\n", - "sel_polygon.gdf" + "sel_polygon.data" ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 31, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "2024-04-12 14:42:09,423 - climada.entity.exposures.base - INFO - Setting latitude and longitude attributes.\n", "\n", "the crs has changed to EPSG:3395\n", "the values for latitude and longitude are now according to the new coordinate system: \n" @@ -709,91 +883,69 @@ " \n", " \n", " name\n", - " geometry\n", " value\n", - " latitude\n", - " longitude\n", " impf_TC\n", + " geometry\n", " \n", " \n", " \n", " \n", " 36\n", " Porto-Novo\n", - " POINT (291281.418 718442.692)\n", - " 36\n", - " 718442.691819\n", - " 291281.418257\n", + " 0.573619\n", " 1\n", + " POINT (291281.418 718442.692)\n", " \n", " \n", " 46\n", " Lomé\n", - " POINT (135900.092 679566.331)\n", - " 46\n", - " 679566.330586\n", - " 135900.092271\n", + " 0.176892\n", " 1\n", + " POINT (135900.092 679566.331)\n", " \n", " \n", " 93\n", " Malabo\n", - " POINT (977749.979 414955.553)\n", - " 93\n", - " 414955.553292\n", - " 977749.978796\n", + " 0.088459\n", " 1\n", + " POINT (977749.979 414955.553)\n", " \n", " \n", " 123\n", " Cotonou\n", - " POINT (267651.551 705052.049)\n", - " 123\n", - " 705052.049006\n", - " 267651.551008\n", + " 0.441703\n", " 1\n", + " POINT (267651.551 705052.049)\n", " \n", " \n", " 135\n", " São Tomé\n", - " POINT (749141.190 37315.322)\n", - " 135\n", - " 37315.322206\n", - " 749141.189651\n", + " 0.726704\n", " 1\n", + " POINT (749141.190 37315.322)\n", " \n", " \n", " 225\n", " Lagos\n", - " POINT (377326.898 714202.107)\n", - " 225\n", - " 714202.106826\n", - " 377326.898464\n", + " 0.990135\n", " 1\n", + " POINT (377326.898 714202.107)\n", " \n", " \n", "\n", "" ], "text/plain": [ - " name geometry value latitude \\\n", - "36 Porto-Novo POINT (291281.418 718442.692) 36 718442.691819 \n", - "46 Lomé POINT (135900.092 679566.331) 46 679566.330586 \n", - "93 Malabo POINT (977749.979 414955.553) 93 414955.553292 \n", - "123 Cotonou POINT (267651.551 705052.049) 123 705052.049006 \n", - "135 São Tomé POINT (749141.190 37315.322) 135 37315.322206 \n", - "225 Lagos POINT (377326.898 714202.107) 225 714202.106826 \n", - "\n", - " longitude impf_TC \n", - "36 291281.418257 1 \n", - "46 135900.092271 1 \n", - "93 977749.978796 1 \n", - "123 267651.551008 1 \n", - "135 749141.189651 1 \n", - "225 377326.898464 1 " + " name value impf_TC geometry\n", + "36 Porto-Novo 0.573619 1 POINT (291281.418 718442.692)\n", + "46 Lomé 0.176892 1 POINT (135900.092 679566.331)\n", + "93 Malabo 0.088459 1 POINT (977749.979 414955.553)\n", + "123 Cotonou 0.441703 1 POINT (267651.551 705052.049)\n", + "135 São Tomé 0.726704 1 POINT (749141.190 37315.322)\n", + "225 Lagos 0.990135 1 POINT (377326.898 714202.107)" ] }, - "execution_count": 13, + "execution_count": 31, "metadata": {}, "output_type": "execute_result" } @@ -806,20 +958,20 @@ "print(\n", " \"the values for latitude and longitude are now according to the new coordinate system: \"\n", ")\n", - "sel_polygon.gdf" + "sel_polygon.data" ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 35, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "exp_all type and number of rows: 25\n", - "number of unique rows: 23\n" + "exp_all type and number of rows: 28\n", + "number of unique rows: 26\n" ] }, { @@ -842,336 +994,111 @@ "\n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
namegeometryvaluelatitudelongitudeimpf_TC
0LomePOINT (135900.088 679566.334)36679566.3339521.359001e+051
1MalaboPOINT (977749.984 414955.551)84414955.5508579.777500e+051
2CotonouPOINT (280307.458 709388.810)113709388.8101602.803075e+051
3Sao TomePOINT (749550.327 36865.909)12536865.9086827.495503e+051
4TarawaPOINT (19260227.883 147982.749)9147982.7489781.926023e+071
\n", - "" - ], - "text/plain": [ - " name geometry value latitude \\\n", - "0 Lome POINT (135900.088 679566.334) 36 679566.333952 \n", - "1 Malabo POINT (977749.984 414955.551) 84 414955.550857 \n", - "2 Cotonou POINT (280307.458 709388.810) 113 709388.810160 \n", - "3 Sao Tome POINT (749550.327 36865.909) 125 36865.908682 \n", - "4 Tarawa POINT (19260227.883 147982.749) 9 147982.748978 \n", - "\n", - " longitude impf_TC \n", - "0 1.359001e+05 1 \n", - "1 9.777500e+05 1 \n", - "2 2.803075e+05 1 \n", - "3 7.495503e+05 1 \n", - "4 1.926023e+07 1 " - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Example 4: concatenate exposures\n", - "exp_all = Exposures.concat([sel_polygon, sel_exp.to_crs(epsg=3395)])\n", - "\n", - "# the output is of type Exposures\n", - "print(\"exp_all type and number of rows:\", type(exp_all), exp_all.gdf.shape[0])\n", - "print(\"number of unique rows:\", exp_all.gdf.drop_duplicates().shape[0])\n", - "\n", - "# NaNs will appear in the missing values\n", - "exp_all.gdf.head()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "### Exposures of any file type supported by Geopandas and Pandas\n", - "\n", - "Geopandas can read almost any vector-based spatial data format including ESRI shapefile, GeoJSON files and more, see [readers geopandas](http://geopandas.org/io.html). Pandas supports formats such as csv, html or sql; see [readers pandas](https://pandas.pydata.org/pandas-docs/stable/io.html). Using the corresponding readers, `DataFrame` and `GeoDataFrame` can be filled and provided to `Exposures` following the previous examples." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "### Exposures from an excel file\n", - "\n", - "If you manually collect exposure data, Excel may be your preferred option. \n", - "In this case, it is easiest if you format your data according to the structure provided in the template `climada_python/climada/data/system/entity_template.xlsx`, in the sheet `assets`." - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "exp_templ is a DataFrame: \n", - "exp_templ looks like:\n" - ] - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", + " \n", + " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", " \n", - " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", - " \n", + " \n", " \n", " \n", "
latitudelongitudevaluedeductiblecoverregion_idcategory_idimpf_TCcentr_TCimpf_FLcentr_FL
026.933899-80.1287991.392750e+1001.392750e+10111111
126.957203-80.0982841.259606e+1001.259606e+10111212namevalueimpf_TCgeometry
226.783846-80.7489471.259606e+1001.259606e+10111323Kuala Lumpur0.88047313POINT (11319934.225 347356.996)
326.645524-80.5507041.259606e+1001.259606e+1024Kinshasa0.074387111414POINT (1704638.257 -479002.730)
426.897796-80.5969291.259606e+1001.259606e+10125Nairobi0.2971701POINT (4098194.882 -141701.948)
26Bogota0.42089115POINT (-8247136.736 509015.405)
27Singapore0.83063515POINT (11560960.460 143203.754)
\n", "
" ], "text/plain": [ - " latitude longitude value deductible cover region_id \\\n", - "0 26.933899 -80.128799 1.392750e+10 0 1.392750e+10 1 \n", - "1 26.957203 -80.098284 1.259606e+10 0 1.259606e+10 1 \n", - "2 26.783846 -80.748947 1.259606e+10 0 1.259606e+10 1 \n", - "3 26.645524 -80.550704 1.259606e+10 0 1.259606e+10 1 \n", - "4 26.897796 -80.596929 1.259606e+10 0 1.259606e+10 1 \n", - "\n", - " category_id impf_TC centr_TC impf_FL centr_FL \n", - "0 1 1 1 1 1 \n", - "1 1 1 2 1 2 \n", - "2 1 1 3 1 3 \n", - "3 1 1 4 1 4 \n", - "4 1 1 5 1 5 " + " name value impf_TC geometry\n", + "23 Kuala Lumpur 0.880473 1 POINT (11319934.225 347356.996)\n", + "24 Kinshasa 0.074387 1 POINT (1704638.257 -479002.730)\n", + "25 Nairobi 0.297170 1 POINT (4098194.882 -141701.948)\n", + "26 Bogota 0.420891 1 POINT (-8247136.736 509015.405)\n", + "27 Singapore 0.830635 1 POINT (11560960.460 143203.754)" ] }, - "execution_count": 15, + "execution_count": 35, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "import pandas as pd\n", - "from climada.util.constants import ENT_TEMPLATE_XLS\n", - "from climada.entity import Exposures\n", + "# Example 4: concatenate exposures\n", + "exp_all = Exposures.concat([sel_polygon, sel_exp.to_crs(epsg=3395)])\n", "\n", - "# Read your Excel file into a pandas DataFrame (we will use the template example for this demonstration):\n", - "file_name = ENT_TEMPLATE_XLS\n", - "exp_templ = pd.read_excel(file_name)\n", + "# the output is of type Exposures\n", + "print(\"exp_all type and number of rows:\", type(exp_all), exp_all.gdf.shape[0])\n", + "print(\"number of unique rows:\", exp_all.gdf.drop_duplicates().shape[0])\n", "\n", - "# Let's have a look at the data:\n", - "print(\"exp_templ is a DataFrame:\", str(type(exp_templ)))\n", - "print(\"exp_templ looks like:\")\n", - "exp_templ.head()" + "# NaNs will appear in the missing values\n", + "exp_all.data.tail()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "As we can see, the general structure is the same as always: the exposure has `latitude`, `longitude` and `value` columns. Further, this example specified several impact function ids: some for Tropical Cyclones (`impf_TC`), and some for Floods (`impf_FL`). It also provides some meta-info (`region_id`, `category_id`) and insurance info relevant to the impact calculation in later steps (`cover`, `deductible`)." + "\n", + "### Exposures of any file type supported by Geopandas and Pandas\n", + "\n", + "Geopandas can read almost any vector-based spatial data format including ESRI shapefile, GeoJSON files and more, see [readers geopandas](http://geopandas.org/io.html). Pandas supports formats such as csv, html or sql; see [readers pandas](https://pandas.pydata.org/pandas-docs/stable/io.html). Using the corresponding readers, `DataFrame` and `GeoDataFrame` can be filled and provided to `Exposures` following the previous examples." ] }, { - "cell_type": "code", - "execution_count": 17, + "cell_type": "markdown", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "exp_templ is now an Exposures: \n", - "\n", - "set_geometry logs:\n", - "2024-04-12 14:44:29,822 - climada.util.coordinates - INFO - Setting geometry points.\n", - "\n", - "check exp_templ:\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/lseverino/Documents/PhD/workspace/climada_python/climada/util/coordinates.py:2755: FutureWarning: You are adding a column named 'geometry' to a GeoDataFrame constructed without an active geometry column. Currently, this automatically sets the active geometry column to 'geometry' but in the future that will no longer happen. Instead, either provide geometry to the GeoDataFrame constructor (GeoDataFrame(... geometry=GeoSeries()) or use `set_geometry('geometry')` to explicitly set the active geometry column.\n", - " df_val['geometry'] = gpd.GeoSeries(\n" - ] - } - ], "source": [ - "# Generate an Exposures instance from the dataframe.\n", - "exp_templ = Exposures(exp_templ)\n", - "print(\"\\n\" + \"exp_templ is now an Exposures:\", str(type(exp_templ)))\n", "\n", - "# set geometry attribute (shapely Points) from GeoDataFrame from latitude and longitude\n", - "print(\"\\n\" + \"set_geometry logs:\")\n", - "exp_templ.set_geometry_points()\n", - "# as always, run check method to include metadata and check for missing mandatory parameters\n", + "### Exposures from an excel file\n", "\n", - "print(\"\\n\" + \"check exp_templ:\")\n", - "exp_templ.check()" + "If you manually collect exposure data, Excel may be your preferred option. \n", + "In this case, it is easiest if you format your data according to the structure provided in the template `climada_python/climada/data/system/entity_template.xlsx`, in the sheet `assets`." ] }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 42, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\n", - "exp_templ.gdf looks like:\n" + "exp_templ is a DataFrame: \n", + "exp_templ looks like:\n" ] }, { @@ -1206,7 +1133,6 @@ " centr_TC\n", " impf_FL\n", " centr_FL\n", - " geometry\n", " \n", " \n", " \n", @@ -1223,7 +1149,6 @@ " 1\n", " 1\n", " 1\n", - " POINT (-80.12880 26.93390)\n", " \n", " \n", " 1\n", @@ -1238,7 +1163,6 @@ " 2\n", " 1\n", " 2\n", - " POINT (-80.09828 26.95720)\n", " \n", " \n", " 2\n", @@ -1253,7 +1177,6 @@ " 3\n", " 1\n", " 3\n", - " POINT (-80.74895 26.78385)\n", " \n", " \n", " 3\n", @@ -1268,7 +1191,6 @@ " 4\n", " 1\n", " 4\n", - " POINT (-80.55070 26.64552)\n", " \n", " \n", " 4\n", @@ -1283,7 +1205,6 @@ " 5\n", " 1\n", " 5\n", - " POINT (-80.59693 26.89780)\n", " \n", " \n", "\n", @@ -1297,191 +1218,157 @@ "3 26.645524 -80.550704 1.259606e+10 0 1.259606e+10 1 \n", "4 26.897796 -80.596929 1.259606e+10 0 1.259606e+10 1 \n", "\n", - " category_id impf_TC centr_TC impf_FL centr_FL \\\n", - "0 1 1 1 1 1 \n", - "1 1 1 2 1 2 \n", - "2 1 1 3 1 3 \n", - "3 1 1 4 1 4 \n", - "4 1 1 5 1 5 \n", - "\n", - " geometry \n", - "0 POINT (-80.12880 26.93390) \n", - "1 POINT (-80.09828 26.95720) \n", - "2 POINT (-80.74895 26.78385) \n", - "3 POINT (-80.55070 26.64552) \n", - "4 POINT (-80.59693 26.89780) " + " category_id impf_TC centr_TC impf_FL centr_FL \n", + "0 1 1 1 1 1 \n", + "1 1 1 2 1 2 \n", + "2 1 1 3 1 3 \n", + "3 1 1 4 1 4 \n", + "4 1 1 5 1 5 " ] }, - "execution_count": 18, + "execution_count": 42, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "# Let's have a look at our Exposures instance!\n", - "print(\"\\n\" + \"exp_templ.gdf looks like:\")\n", - "exp_templ.gdf.head()" + "import pandas as pd\n", + "from climada.util.constants import ENT_TEMPLATE_XLS\n", + "from climada.entity import Exposures\n", + "\n", + "# Read your Excel file into a pandas DataFrame (we will use the template example for this demonstration):\n", + "file_name = ENT_TEMPLATE_XLS\n", + "exp_templ = pd.read_excel(file_name)\n", + "\n", + "# Let's have a look at the data:\n", + "print(\"exp_templ is a DataFrame:\", str(type(exp_templ)))\n", + "print(\"exp_templ looks like:\")\n", + "exp_templ.head()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Exposures from a raster file\n", - "\n", - "Last but not least, you may have your exposure data stored in a raster file. Raster data may be read in from any file-type supported by [rasterio](https://rasterio.readthedocs.io/en/stable/). " + "As we can see, the general structure is the same as always: the exposure has `latitude`, `longitude` and `value` columns. Further, this example specified several impact function ids: some for Tropical Cyclones (`impf_TC`), and some for Floods (`impf_FL`). It also provides some meta-info (`region_id`, `category_id`) and insurance info relevant to the impact calculation in later steps (`cover`, `deductible`)." ] }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 43, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "2024-04-12 14:44:52,508 - climada.util.coordinates - INFO - Reading /Users/lseverino/climada/demo/data/SC22000_VE__M1.grd.gz\n" + "\n", + "exp_templ is now an Exposures: description: None\n", + "ref_year: 2018\n", + "value_unit: USD\n", + "crs: EPSG:4326\n", + "data: (24 entries)\n", + " value deductible cover region_id category_id impf_TC \\\n", + "0 1.392750e+10 0 1.392750e+10 1 1 1 \n", + "1 1.259606e+10 0 1.259606e+10 1 1 1 \n", + "2 1.259606e+10 0 1.259606e+10 1 1 1 \n", + "3 1.259606e+10 0 1.259606e+10 1 1 1 \n", + "20 1.259760e+10 0 1.259760e+10 1 1 1 \n", + "21 1.281454e+10 0 1.281454e+10 1 1 1 \n", + "22 1.262176e+10 0 1.262176e+10 1 1 1 \n", + "23 1.259754e+10 0 1.259754e+10 1 1 1 \n", + "\n", + " centr_TC impf_FL centr_FL geometry \n", + "0 1 1 1 POINT (-80.12880 26.93390) \n", + "1 2 1 2 POINT (-80.09828 26.95720) \n", + "2 3 1 3 POINT (-80.74895 26.78385) \n", + "3 4 1 4 POINT (-80.55070 26.64552) \n", + "20 21 1 21 POINT (-80.06858 26.71255) \n", + "21 22 1 22 POINT (-80.09070 26.66490) \n", + "22 23 1 23 POINT (-80.12540 26.66470) \n", + "23 24 1 24 POINT (-80.15140 26.66315) \n" ] } ], "source": [ - "from rasterio.windows import Window\n", - "from climada.util.constants import HAZ_DEMO_FL\n", - "from climada.entity import Exposures\n", + "# Generate an Exposures instance from the dataframe.\n", + "exp_templ = Exposures(exp_templ)\n", + "print(\"\\n\" + \"exp_templ is now an Exposures:\", exp_templ)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Exposures from a raster file\n", "\n", - "# We take an example with a dummy raster file (HAZ_DEMO_FL), running the method set_from_raster directly loads the\n", - "# necessary info from the file into an Exposures instance.\n", - "exp_raster = Exposures.from_raster(HAZ_DEMO_FL, window=Window(10, 20, 50, 60))\n", - "# There are several keyword argument options that come with the set_from_raster method (such as\n", - "# specifying a window, if not the entire file should be read, or a bounding box. Check them out." + "Last but not least, you may have your exposure data stored in a raster file. Raster data may be read in from any file-type supported by [rasterio](https://rasterio.readthedocs.io/en/stable/). " ] }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 19, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "2024-04-12 14:44:53,848 - climada.entity.exposures.base - INFO - Setting impf_ to default impact functions ids 1.\n", - "2024-04-12 14:44:53,849 - climada.entity.exposures.base - INFO - category_id not set.\n", - "2024-04-12 14:44:53,850 - climada.entity.exposures.base - INFO - cover not set.\n", - "2024-04-12 14:44:53,850 - climada.entity.exposures.base - INFO - deductible not set.\n", - "2024-04-12 14:44:53,851 - climada.entity.exposures.base - INFO - geometry not set.\n", - "2024-04-12 14:44:53,851 - climada.entity.exposures.base - INFO - region_id not set.\n", - "2024-04-12 14:44:53,852 - climada.entity.exposures.base - INFO - centr_ not set.\n", - "Meta: {'driver': 'GSBG', 'dtype': 'float32', 'nodata': 1.701410009187828e+38, 'width': 50, 'height': 60, 'count': 1, 'crs': CRS.from_epsg(4326), 'transform': Affine(0.009000000000000341, 0.0, -69.2471495969998,\n", - " 0.0, -0.009000000000000341, 10.248220966978932)}\n" + "2024-10-04 17:19:03,632 - climada.util.coordinates - INFO - Reading C:\\Users\\me\\climada\\demo\\data\\SC22000_VE__M1.grd.gz\n" ] } ], "source": [ - "# As always, run the check method, such that metadata can be assigned and checked for missing mandatory parameters.\n", - "exp_raster.check()\n", - "print(\"Meta:\", exp_raster.meta)" + "from rasterio.windows import Window\n", + "from climada.util.constants import HAZ_DEMO_FL\n", + "from climada.entity import Exposures\n", + "\n", + "# We take an example with a dummy raster file (HAZ_DEMO_FL), running the method set_from_raster directly loads the\n", + "# necessary info from the file into an Exposures instance.\n", + "exp_raster = Exposures.from_raster(HAZ_DEMO_FL, window=Window(10, 20, 50, 60))\n", + "# There are several keyword argument options that come with the set_from_raster method (such as\n", + "# specifying a window, if not the entire file should be read, or a bounding box. Check them out." ] }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 20, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\n", - "exp_raster looks like:\n" + "2024-10-04 17:19:03,725 - climada.util.coordinates - INFO - Raster from resolution 0.009000000000000341 to 0.009000000000000341.\n" ] }, { "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
longitudelatitudevalueimpf_
0-69.2426510.2437210.01
1-69.2336510.2437210.01
2-69.2246510.2437210.01
3-69.2156510.2437210.01
4-69.2066510.2437210.01
\n", - "
" - ], "text/plain": [ - " longitude latitude value impf_\n", - "0 -69.24265 10.243721 0.0 1\n", - "1 -69.23365 10.243721 0.0 1\n", - "2 -69.22465 10.243721 0.0 1\n", - "3 -69.21565 10.243721 0.0 1\n", - "4 -69.20665 10.243721 0.0 1" + "{'crs': \n", + " Name: WGS 84\n", + " Axis Info [ellipsoidal]:\n", + " - Lat[north]: Geodetic latitude (degree)\n", + " - Lon[east]: Geodetic longitude (degree)\n", + " Area of Use:\n", + " - name: World.\n", + " - bounds: (-180.0, -90.0, 180.0, 90.0)\n", + " Datum: World Geodetic System 1984 ensemble\n", + " - Ellipsoid: WGS 84\n", + " - Prime Meridian: Greenwich,\n", + " 'height': 60,\n", + " 'width': 50,\n", + " 'transform': Affine(0.009000000000000341, 0.0, -69.2471495969998,\n", + " 0.0, -0.009000000000000341, 10.248220966978932)}" ] }, - "execution_count": 21, + "execution_count": 20, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "# Let's have a look at the Exposures instance!\n", - "print(\"\\n\" + \"exp_raster looks like:\")\n", - "exp_raster.gdf.head()" + "exp_raster.derive_raster()" ] }, { @@ -1499,14 +1386,46 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 21, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\n" + "2024-10-04 17:19:04,888 - climada.entity.exposures.base - INFO - Reading C:\\Users\\me\\climada\\demo\\data\\exp_demo_today.h5\n", + "description: None\n", + "ref_year: 2016\n", + "value_unit: USD\n", + "crs: EPSG:4326\n", + "data: (50 entries)\n", + " value impf_TC deductible cover category_id region_id \\\n", + "0 1.392750e+10 1 0.0 1.392750e+10 1 1.0 \n", + "1 1.259606e+10 1 0.0 1.259606e+10 1 1.0 \n", + "2 1.259606e+10 1 0.0 1.259606e+10 1 1.0 \n", + "3 1.259606e+10 1 0.0 1.259606e+10 1 1.0 \n", + "46 1.264524e+10 1 0.0 1.264524e+10 1 1.0 \n", + "47 1.281438e+10 1 0.0 1.281438e+10 1 1.0 \n", + "48 1.260291e+10 1 0.0 1.260291e+10 1 1.0 \n", + "49 1.262482e+10 1 0.0 1.262482e+10 1 1.0 \n", + "\n", + " geometry \n", + "0 POINT (-80.12880 26.93390) \n", + "1 POINT (-80.09828 26.95720) \n", + "2 POINT (-80.74895 26.78385) \n", + "3 POINT (-80.55070 26.64552) \n", + "46 POINT (-80.11640 26.34907) \n", + "47 POINT (-80.08385 26.34635) \n", + "48 POINT (-80.24130 26.34802) \n", + "49 POINT (-80.15886 26.34796) \n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "c:\\Users\\me\\miniconda3\\envs\\climada_env\\Lib\\pickle.py:1718: UserWarning: Unpickling a shapely <2.0 geometry object. Please save the pickle again; shapely 2.1 will not have this compatibility.\n", + " setstate(state)\n" ] } ], @@ -1516,17 +1435,7 @@ "from climada.util.constants import EXP_DEMO_H5\n", "\n", "exp_hdf5 = Exposures.from_hdf5(EXP_DEMO_H5)\n", - "exp_hdf5.check()\n", - "print(type(exp_hdf5))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Before you leave ...\n", - "\n", - "After defining an `Exposures` instance use always the `check()` method to see which attributes are missing. This method will raise an ERROR if `value`, `longitude` or `latitude` ar missing and an INFO messages for the the optional variables not set." + "print(exp_hdf5)" ] }, { @@ -1783,7 +1692,8 @@ "id": "5d078d09", "metadata": {}, "source": [ - "Finally, as with any Python object, use climada's save option to save it in pickle format. Note however, that pickle has a transient format and should be avoided when possible." + "Optionally use climada's save option to save it in pickle format. This allows fast to quickly restore the object in its current state and take up your work right were you left it the next time.\n", + "Note however, that pickle has a transient format and is not suitable for storing data persistently." ] }, { @@ -1798,51 +1708,6 @@ "# this generates a results folder in the current path and stores the output there\n", "save(\"exp_templ.pkl.p\", exp_templ) # creates results folder and stores there" ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## Dask - improving performance for big exposure\n", - "\n", - "Dask is used in some methods of CLIMADA and can be activated easily by proving the scheduler." - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " value latitude longitude impf_TC\n", - "0 0 15.0 20.000000 1\n", - "1 1 15.0 20.202020 1\n", - "2 2 15.0 20.404040 1\n", - "3 3 15.0 20.606061 1\n", - "4 4 15.0 20.808081 1\n", - "CPU times: user 243 ms, sys: 116 ms, total: 359 ms\n", - "Wall time: 2.52 s\n", - " value latitude longitude impf_TC geometry\n", - "0 0 15.0 20.000000 1 POINT (20.00000 15.00000)\n", - "1 1 15.0 20.202020 1 POINT (20.20202 15.00000)\n", - "2 2 15.0 20.404040 1 POINT (20.40404 15.00000)\n", - "3 3 15.0 20.606061 1 POINT (20.60606 15.00000)\n", - "4 4 15.0 20.808081 1 POINT (20.80808 15.00000)\n" - ] - } - ], - "source": [ - "# set_geometry_points is expensive for big exposures\n", - "# for small amount of data, the execution time might be even greater when using dask\n", - "exp.gdf.drop(columns=[\"geometry\"], inplace=True)\n", - "print(exp.gdf.head())\n", - "%time exp.set_geometry_points(scheduler='processes')\n", - "print(exp.gdf.head())" - ] } ], "metadata": { @@ -1862,7 +1727,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.18" + "version": "3.11.9" }, "latex_envs": { "LaTeX_envs_menu_present": true, diff --git a/doc/tutorial/climada_entity_LitPop.ipynb b/doc/tutorial/climada_entity_LitPop.ipynb index 56c2d065a0..b41728bf2d 100644 --- a/doc/tutorial/climada_entity_LitPop.ipynb +++ b/doc/tutorial/climada_entity_LitPop.ipynb @@ -754,14 +754,13 @@ "ent_adm0 = LitPop.from_countries(\n", " \"CHE\", res_arcsec=120, fin_mode=\"gdp\", admin1_calc=False\n", ")\n", - "ent_adm0.set_geometry_points()\n", + "ent_adm0.check()\n", "\n", "ent_adm1 = LitPop.from_countries(\n", " \"CHE\", res_arcsec=120, fin_mode=\"gdp\", admin1_calc=True\n", ")\n", - "\n", - "ent_adm0.check()\n", "ent_adm1.check()\n", + "\n", "print(\"Done.\")" ] }, diff --git a/doc/tutorial/climada_hazard_TropCyclone.ipynb b/doc/tutorial/climada_hazard_TropCyclone.ipynb index 480d5c0b49..47df87fb75 100644 --- a/doc/tutorial/climada_hazard_TropCyclone.ipynb +++ b/doc/tutorial/climada_hazard_TropCyclone.ipynb @@ -1895,7 +1895,6 @@ "# construct centroids\n", "min_lat, max_lat, min_lon, max_lon = 16.99375, 21.95625, -72.48125, -61.66875\n", "cent = Centroids.from_pnt_bounds((min_lon, min_lat, max_lon, max_lat), res=0.12)\n", - "cent.check()\n", "cent.plot()\n", "\n", "# construct tropical cyclones\n", diff --git a/script/applications/eca_san_salvador/San_Salvador_Risk.ipynb b/script/applications/eca_san_salvador/San_Salvador_Risk.ipynb index b73180b385..84c9d37a39 100644 --- a/script/applications/eca_san_salvador/San_Salvador_Risk.ipynb +++ b/script/applications/eca_san_salvador/San_Salvador_Risk.ipynb @@ -754,8 +754,8 @@ ], "source": [ "point_idx = 1064\n", - "point_lat = exp_acel.gdf.latitude.values[point_idx]\n", - "point_lon = exp_acel.gdf.longitude.values[point_idx]\n", + "point_lat = exp_acel.latitude[point_idx]\n", + "point_lon = exp_acel.longitude[point_idx]\n", "point_eai = imp_acel.eai_exp[point_idx]\n", "print(\n", " \"Annual expected impact in {:.4f}° N {:.4f}° W is {:.0f} USD.\".format(\n", diff --git a/script/applications/eca_san_salvador/functions_ss.py b/script/applications/eca_san_salvador/functions_ss.py index 3d04785589..536fbc55dc 100755 --- a/script/applications/eca_san_salvador/functions_ss.py +++ b/script/applications/eca_san_salvador/functions_ss.py @@ -62,7 +62,6 @@ def plot_salvador_ma(): def load_entity(): ent_file = "FL_entity_Acelhuate_houses.xlsx" ent = Entity.from_excel(ent_file) - ent.exposures.set_geometry_points() ent.check() return ent @@ -277,7 +276,6 @@ def load_accounting(): def generate_plots_risk(): fig_ma = plot_salvador_ma() ent = load_entity() - ent.exposures.set_geometry_points() ent.exposures.to_crs(epsg=3857, inplace=True) fig_point = plot_exposure_ss(ent.exposures, 1064) fig_houses = plot_exposure_ss(ent.exposures) diff --git a/script/jenkins/petals_regression_test/Jenkinsfile b/script/jenkins/petals_regression_test/Jenkinsfile index 433771e3ab..a78d32d369 100644 --- a/script/jenkins/petals_regression_test/Jenkinsfile +++ b/script/jenkins/petals_regression_test/Jenkinsfile @@ -4,7 +4,7 @@ pipeline { stages { stage('integ_test') { steps { - sh 'bash script/jenkins/petals_regression_test/run_integ_test.sh' + sh "bash script/jenkins/petals_regression_test/run_integ_test.sh ${env.GIT_BRANCH}" } } } diff --git a/script/jenkins/petals_regression_test/run_integ_test.sh b/script/jenkins/petals_regression_test/run_integ_test.sh index 1c93998794..c532a1709f 100644 --- a/script/jenkins/petals_regression_test/run_integ_test.sh +++ b/script/jenkins/petals_regression_test/run_integ_test.sh @@ -5,7 +5,8 @@ mamba env update -n climada_env -f ~/jobs/petals_install_env/workspace/requireme source activate climada_env REGTESTENV=~/jobs/petals_compatibility/petals_env -BRANCH=`git branch -r | grep PR | cut -f 2 -d /` +BRANCH=$1 +echo ::: $REGTESTENV/$BRANCH PETALS_DIR=`test -e $REGTESTENV/$BRANCH && cat $REGTESTENV/$BRANCH || echo ~/jobs/petals_branches/branches/develop/workspace` python -m venv --system-site-packages tvenv From 242f1f3874583c75f489cd0a623c764151bfcfef Mon Sep 17 00:00:00 2001 From: emanuel-schmid Date: Mon, 21 Oct 2024 21:31:19 +0200 Subject: [PATCH 4/4] dpendencies: pin bayesian-optimization to 1.5: 2.0 has no bayes_opt.UtilityFunction --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 9429535c8f..94514cf74c 100644 --- a/setup.py +++ b/setup.py @@ -60,7 +60,7 @@ keywords="climate adaptation", python_requires=">=3.9,<3.12", install_requires=[ - "bayesian-optimization", + "bayesian-optimization<2.0", "bottleneck", "cartopy", "cfgrib",