diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 9ebd17f4d..8cf4c6ce4 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -7,9 +7,11 @@ jobs:
contrib-readme-job:
runs-on: ubuntu-latest
name: A job to automate contrib in readme
- if: ${{ github.repository_owner == 'pypsa-meets-earth' && github.ref == 'refs/heads/main'}}
+ if: ${{ github.event_name == 'workflow_dispatch' || (github.repository_owner == 'pypsa-meets-earth' && github.ref == 'refs/heads/main')}}
steps:
- name: Contribute List
uses: akhilmhdh/contributors-readme-action@v2.3.10
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ use_username: true
diff --git a/README.md b/README.md
index 34b998185..6de82672e 100644
--- a/README.md
+++ b/README.md
@@ -187,322 +187,396 @@ The documentation is available here: [documentation](https://pypsa-earth.readthe
diff --git a/Snakefile b/Snakefile
index ddf8c3f15..2cc10e4f6 100644
--- a/Snakefile
+++ b/Snakefile
@@ -53,12 +53,11 @@ CDIR = RDIR if not run.get("shared_cutouts") else ""
SECDIR = run["sector_name"] + "/" if run.get("sector_name") else ""
SDIR = config["summary_dir"].strip("/") + f"/{SECDIR}"
RESDIR = config["results_dir"].strip("/") + f"/{SECDIR}"
-COSTDIR = config["costs_dir"]
load_data_paths = get_load_paths_gegis("data", config)
if config["enable"].get("retrieve_cost_data", True):
- COSTS = "resources/" + RDIR + "costs.csv"
+ COSTS = "resources/" + RDIR + f"costs_{config['costs']['year']}.csv"
else:
COSTS = "data/costs.csv"
ATLITE_NPROCESSES = config["atlite"].get("nprocesses", 4)
@@ -392,29 +391,18 @@ if not config["enable"].get("build_natura_raster", False):
if config["enable"].get("retrieve_cost_data", True):
rule retrieve_cost_data:
+ params:
+ version=config["costs"]["version"],
input:
HTTP.remote(
- f"raw.githubusercontent.com/PyPSA/technology-data/{config['costs']['version']}/outputs/costs_{config['costs']['year']}.csv",
+ f"raw.githubusercontent.com/PyPSA/technology-data/{config['costs']['version']}/outputs/"
+ + "costs_{year}.csv",
keep_local=True,
),
output:
- COSTS,
+ "resources/" + RDIR + "costs_{year}.csv",
log:
- "logs/" + RDIR + "retrieve_cost_data.log",
- resources:
- mem_mb=5000,
- run:
- move(input[0], output[0])
-
- rule retrieve_cost_data_flexible:
- input:
- HTTP.remote(
- f"raw.githubusercontent.com/PyPSA/technology-data/{config['costs']['version']}/outputs/costs"
- + "_{planning_horizons}.csv",
- keep_local=True,
- ),
- output:
- costs=COSTDIR + "costs_{planning_horizons}.csv",
+ "logs/" + RDIR + "retrieve_cost_data_{year}.log",
resources:
mem_mb=5000,
run:
@@ -1079,7 +1067,7 @@ rule prepare_sector_network:
input:
network=RESDIR
+ "prenetworks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{sopts}_{planning_horizons}_{discountrate}_{demand}_presec.nc",
- costs=COSTDIR + "costs_{planning_horizons}.csv",
+ costs="resources/" + RDIR + "costs_{planning_horizons}.csv",
h2_cavern="data/hydrogen_salt_cavern_potentials.csv",
nodal_energy_totals="resources/"
+ SECDIR
@@ -1173,7 +1161,7 @@ rule add_export:
input:
overrides="data/override_component_attrs",
export_ports="resources/" + SECDIR + "export_ports.csv",
- costs=COSTDIR + "costs_{planning_horizons}.csv",
+ costs="resources/" + RDIR + "costs_{planning_horizons}.csv",
ship_profile="resources/" + SECDIR + "ship_profile_{h2export}TWh.csv",
network=RESDIR
+ "prenetworks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{sopts}_{planning_horizons}_{discountrate}_{demand}.nc",
@@ -1366,6 +1354,7 @@ rule build_base_energy_totals:
unsd_paths="data/demand/unsd/paths/Energy_Statistics_Database.xlsx",
output:
energy_totals_base="resources/" + SECDIR + "energy_totals_base.csv",
+ unsd_export_path=directory("data/demand/unsd/data/"),
script:
"scripts/build_base_energy_totals.py"
@@ -1639,7 +1628,7 @@ if config["foresight"] == "overnight":
# + "prenetworks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{sopts}_{planning_horizons}_{discountrate}.nc",
network=RESDIR
+ "prenetworks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{sopts}_{planning_horizons}_{discountrate}_{demand}_{h2export}export.nc",
- costs=COSTDIR + "costs_{planning_horizons}.csv",
+ costs="resources/" + RDIR + "costs_{planning_horizons}.csv",
configs=SDIR + "configs/config.yaml", # included to trigger copy_config rule
output:
RESDIR
@@ -1684,7 +1673,7 @@ rule make_sector_summary:
**config["costs"],
**config["export"],
),
- costs=COSTDIR + "costs_{planning_horizons}.csv",
+ costs="resources/" + RDIR + "costs_{planning_horizons}.csv",
plots=expand(
RESDIR
+ "maps/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{sopts}-costs-all_{planning_horizons}_{discountrate}_{demand}_{h2export}export.pdf",
@@ -1885,7 +1874,7 @@ rule build_base_industry_totals: #default data
input:
#os.path.dirname(snakemake.input["transactions_path"]) + "/demand/unsd/data/"
#industrial_production_per_country="data/industrial_production_per_country.csv",
- unsd_path="data/demand/unsd/data/",
+ unsd_export_path="data/demand/unsd/data/",
energy_totals_base="resources/" + SECDIR + "energy_totals_base.csv",
transactions_path="data/unsd_transactions.csv",
output:
@@ -1922,7 +1911,7 @@ rule build_industry_demand: #default data
+ SECDIR
+ "demand/base_industry_totals_{planning_horizons}_{demand}.csv",
industrial_database="data/industrial_database.csv",
- costs=COSTDIR + "costs_{planning_horizons}.csv",
+ costs="resources/" + RDIR + "costs_{planning_horizons}.csv",
industry_growth_cagr="data/demand/industry_growth_cagr.csv",
output:
industrial_energy_demand_per_node="resources/"
diff --git a/config.default.yaml b/config.default.yaml
index c88107537..a85ec5127 100644
--- a/config.default.yaml
+++ b/config.default.yaml
@@ -11,7 +11,6 @@ logging:
results_dir: results/
summary_dir: results/
-costs_dir: data/ # TODO change to the equivalent of technology data
foresight: overnight
@@ -358,7 +357,7 @@ renewable:
# Costs Configuration
costs:
year: 2030
- version: v0.6.2
+ version: v0.10.0
discountrate: [0.071] #, 0.086, 0.111]
# [EUR/USD] ECB: https://www.ecb.europa.eu/stats/exchange/eurofxref/html/eurofxref-graph-usd.en.html # noqa: E501
USD2013_to_EUR2013: 0.7532 # [EUR/USD] ECB: https://www.ecb.europa.eu/stats/exchange/eurofxref/html/eurofxref-graph-usd.en.html
diff --git a/data/custom_powerplants.csv b/data/custom_powerplants.csv
index fb83a5ff4..d81c32bca 100644
--- a/data/custom_powerplants.csv
+++ b/data/custom_powerplants.csv
@@ -1 +1 @@
-Name,Fueltype,Technology,Set,Country,Capacity,Efficiency,Duration,Volume_Mm3,DamHeight_m,StorageCapacity_MWh,DateIn,DateRetrofit,DateMothball,DateOut,lat,lon,EIC,projectID,bus
+Name,Fueltype,Technology,Set,Country,Capacity,Efficiency,Duration,Volume_Mm3,DamHeight_m,StorageCapacity_MWh,DateIn,DateRetrofit,DateOut,lat,lon,EIC,projectID,bus
diff --git a/doc/release_notes.rst b/doc/release_notes.rst
index 45acacdca..1c3c38b71 100644
--- a/doc/release_notes.rst
+++ b/doc/release_notes.rst
@@ -13,10 +13,18 @@ This part of documentation collects descriptive release notes to capture the mai
**New Features and Major Changes**
+* Drop duplication of retrieve_data and COST_DIR, add params and update technology-data version `PR #1249 `__
+
+* In alternative clustering, generate hydro inflows by shape and avoid hydro inflows duplication for plants installed in the same node `PR #1120 `
+
* Add a function to calculate length-based efficiencies and apply it to the H2 pipelines. `PR #1192 `__
**Minor Changes and bug-fixing**
+* Prevent computation of powerplantmatching if replace option is selected for custom_powerplants `PR #1281 `__
+
+* Fix overlapping bus regions when alternative clustering is selected `PR #1287 `__
+
* Fix lossy bidirectional links, especially H2 pipelines, which would sometimes gain H2 instead of losing it. `PR #1192 `__
PyPSA-Earth 0.6.0
diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py
index 75ae9ce42..84232f4d8 100755
--- a/scripts/add_electricity.py
+++ b/scripts/add_electricity.py
@@ -488,7 +488,10 @@ def attach_hydro(n, costs, ppl):
ror = ppl.query('technology == "Run-Of-River"')
phs = ppl.query('technology == "Pumped Storage"')
hydro = ppl.query('technology == "Reservoir"')
- bus_id = ppl["bus"]
+ if snakemake.params.alternative_clustering:
+ bus_id = ppl["region_id"]
+ else:
+ bus_id = ppl["bus"]
inflow_idx = ror.index.union(hydro.index)
if not inflow_idx.empty:
diff --git a/scripts/build_base_industry_totals.py b/scripts/build_base_industry_totals.py
index 1e5eda9a8..977f95ea8 100644
--- a/scripts/build_base_industry_totals.py
+++ b/scripts/build_base_industry_totals.py
@@ -117,7 +117,7 @@ def create_industry_base_totals(df):
renaming_dit = transaction.set_index("Transaction")["clean_name"].to_dict()
clean_industry_list = list(transaction.clean_name.unique())
- unsd_path = snakemake.input.unsd_path
+ unsd_path = snakemake.input.unsd_export_path
# Get the files from the path provided in the OP
all_files = list(Path(unsd_path).glob("*.txt"))
diff --git a/scripts/build_bus_regions.py b/scripts/build_bus_regions.py
index 1a0dc2338..e5b3cf3ca 100644
--- a/scripts/build_bus_regions.py
+++ b/scripts/build_bus_regions.py
@@ -256,6 +256,26 @@ def get_gadm_shape(
crs=country_shapes.crs,
).dropna(axis="index", subset=["geometry"])
+ if snakemake.params.alternative_clustering:
+ # determine isolated buses
+ n.determine_network_topology()
+ non_isolated_buses = n.buses.duplicated(subset=["sub_network"], keep=False)
+ isolated_buses = n.buses[~non_isolated_buses].index
+ non_isolated_regions = onshore_regions[
+ ~onshore_regions.name.isin(isolated_buses)
+ ]
+ isolated_regions = onshore_regions[onshore_regions.name.isin(isolated_buses)]
+
+ # Combine regions while prioritizing non-isolated ones
+ onshore_regions = pd.concat(
+ [non_isolated_regions, isolated_regions]
+ ).drop_duplicates("shape_id", keep="first")
+
+ if len(onshore_regions) < len(gadm_country):
+ logger.warning(
+ f"The number of remaining of buses are less than the number of administrative clusters suggested!"
+ )
+
onshore_regions = pd.concat([onshore_regions], ignore_index=True).to_file(
snakemake.output.regions_onshore
)
diff --git a/scripts/build_powerplants.py b/scripts/build_powerplants.py
index 4bf22e524..b1719108d 100644
--- a/scripts/build_powerplants.py
+++ b/scripts/build_powerplants.py
@@ -337,13 +337,16 @@ def replace_natural_gas_technology(df: pd.DataFrame):
else:
config["main_query"] = ""
- ppl = (
- pm.powerplants(from_url=False, update=True, config_update=config)
- .powerplant.fill_missing_decommissioning_years()
- .query('Fueltype not in ["Solar", "Wind"] and Country in @countries_names')
- .powerplant.convert_country_to_alpha2()
- .pipe(replace_natural_gas_technology)
- )
+ if snakemake.config["electricity"]["custom_powerplants"] != "replace":
+ ppl = (
+ pm.powerplants(from_url=False, update=True, config_update=config)
+ .powerplant.fill_missing_decommissioning_years()
+ .query('Fueltype not in ["Solar", "Wind"] and Country in @countries_names')
+ .powerplant.convert_country_to_alpha2()
+ .pipe(replace_natural_gas_technology)
+ )
+ else:
+ ppl = pd.DataFrame()
ppl = add_custom_powerplants(
ppl, snakemake.input, snakemake.config
diff --git a/scripts/build_renewable_profiles.py b/scripts/build_renewable_profiles.py
index 8c16bce09..77427534d 100644
--- a/scripts/build_renewable_profiles.py
+++ b/scripts/build_renewable_profiles.py
@@ -356,6 +356,9 @@ def rescale_hydro(plants, runoff, normalize_using_yearly, normalization_year):
logger.info("No bus has installed hydro plants, ignoring normalization.")
return runoff
+ if snakemake.params.alternative_clustering:
+ plants = plants.set_index("shape_id")
+
years_statistics = normalize_using_yearly.index
if isinstance(years_statistics, pd.DatetimeIndex):
years_statistics = years_statistics.year
@@ -530,6 +533,24 @@ def create_scaling_factor(
# the region should be restricted for non-hydro technologies, as the hydro potential is calculated across hydrobasins which may span beyond the region of the country
cutout = filter_cutout_region(cutout, regions)
+ if snakemake.params.alternative_clustering:
+ regions = gpd.GeoDataFrame(
+ regions.reset_index()
+ .groupby("shape_id")
+ .agg(
+ {
+ "x": "mean",
+ "y": "mean",
+ "country": "first",
+ "geometry": "first",
+ "bus": "first",
+ }
+ )
+ .reset_index()
+ .set_index("bus"),
+ crs=regions.crs,
+ )
+
buses = regions.index
func = getattr(cutout, resource.pop("method"))
@@ -556,10 +577,17 @@ def create_scaling_factor(
# select busbar whose location (p) belongs to at least one hydrobasin geometry
# if extendable option is true, all buses are included
# otherwise only where hydro powerplants are available are considered
- filter_bus_to_consider = regions.index.map(
- lambda bus_id: config.get("extendable", False)
- | (bus_id in hydro_ppls.bus.values)
- )
+ if snakemake.params.alternative_clustering:
+ filter_bus_to_consider = regions.index.map(
+ lambda bus_id: config.get("extendable", False)
+ | (bus_id in hydro_ppls.region_id.values)
+ )
+ ### TODO: quickfix. above case and the below case should by unified
+ if snakemake.params.alternative_clustering == False:
+ filter_bus_to_consider = regions.index.map(
+ lambda bus_id: config.get("extendable", False)
+ | (bus_id in hydro_ppls.bus.values)
+ )
bus_to_consider = regions.index[filter_bus_to_consider]
# identify subset of buses within the hydrobasins
@@ -577,10 +605,17 @@ def create_scaling_factor(
columns={"x": "lon", "y": "lat", "country": "countries"}
).loc[bus_in_hydrobasins, ["lon", "lat", "countries", "shape_id"]]
- resource["plants"]["installed_hydro"] = [
- True if (bus_id in hydro_ppls.bus.values) else False
- for bus_id in resource["plants"].index
- ]
+ # TODO: these cases shall be fixed by restructuring the alternative clustering procedure
+ if snakemake.params.alternative_clustering == False:
+ resource["plants"]["installed_hydro"] = [
+ True if (bus_id in hydro_ppls.bus.values) else False
+ for bus_id in resource["plants"].index
+ ]
+ else:
+ resource["plants"]["installed_hydro"] = [
+ True if (bus_id in hydro_ppls.region_id.values) else False
+ for bus_id in resource["plants"].shape_id.values
+ ]
# get normalization before executing runoff
normalization = None
@@ -596,6 +631,8 @@ def create_scaling_factor(
else:
# otherwise perform the calculations
inflow = correction_factor * func(capacity_factor=True, **resource)
+ if snakemake.params.alternative_clustering:
+ inflow["plant"] = regions.shape_id.loc[inflow["plant"]].values
if "clip_min_inflow" in config:
inflow = inflow.where(inflow >= config["clip_min_inflow"], 0)
diff --git a/test/config.test_myopic.yaml b/test/config.test_myopic.yaml
index 382def55f..ab306a63a 100644
--- a/test/config.test_myopic.yaml
+++ b/test/config.test_myopic.yaml
@@ -8,7 +8,6 @@ tutorial: true
results_dir: results/
summary_dir: results/
-costs_dir: data/ #TODO change to the equivalent of technology data
run:
name: "test_myopic" # use this to keep track of runs with different settings
@@ -99,7 +98,7 @@ custom_data:
costs: # Costs used in PyPSA-Earth-Sec. Year depends on the wildcard planning_horizon in the scenario section
- version: v0.6.2
+ version: v0.10.0
lifetime: 25 #default lifetime
# From a Lion Hirth paper, also reflects average of Noothout et al 2016
discountrate: [0.071] #, 0.086, 0.111]