Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update benchmark table output to better show which diagnostics have Ref = Dev #200

Merged
merged 8 commits into from
Feb 9, 2023
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),

## Unreleased

### Added
- Benchmark summary table output (intended for 1hr & 1mo benchmarks)
- Species/emissions/inventories that differ between Dev & Ref versions are now printed at the top of the benchmark emissions, inventory, and global mass tables. if there are too many species with diffs, an alternate message is printed.
- New functions in `benchmark.py` and `util.py` to facilitate printing of the species/emissions/inventories that differ between Dev & Ref versions.

## [1.3.2] -- 2022-10-25

### Fixes
Expand Down
1 change: 1 addition & 0 deletions benchmark/1mo_benchmark.yml
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ options:
ops_budget_table: False
OH_metrics: True
ste_table: True # GCC only
summary_table: True
plot_options: # Plot concentrations and emissions by category?
by_spc_cat: True
by_hco_cat: True
131 changes: 107 additions & 24 deletions benchmark/run_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -642,29 +642,6 @@ def run_benchmark_default(config):
if config["options"]["outputs"]["OH_metrics"]:
print("\n%%% Creating GCC vs. GCC OH metrics table %%%")

# Use this for benchmarks prior to GEOS-Chem 13.0.0
# # Diagnostic collection files to read
# col = "ConcAfterChem"
# ref = get_filepath(gcc_vs_gcc_refdir, col, gcc_ref_date)
# dev = get_filepath(gcc_vs_gcc_devdir, col, gcc_dev_date)
#
# # Meteorology data needed for calculations
# col = "StateMet"
# refmet = get_filepath(gcc_vs_gcc_refdir, col, gcc_ref_date)
# devmet = get_filepath(gcc_vs_gcc_devdir, col, gcc_dev_date)
#
# # Print OH metrics
# bmk.make_benchmark_oh_metrics(
# ref,
# refmet,
# config["data"]["ref"]["gcc"]["version"],
# dev,
# devmet,
# config["data"]["dev"]["gcc"]["version"],
# dst=gcc_vs_gcc_tablesdir,
# overwrite=True
# )

# Filepaths
ref = get_filepath(gcc_vs_gcc_refdir, "Metrics", gcc_ref_date)
dev = get_filepath(gcc_vs_gcc_devdir, "Metrics", gcc_dev_date)
Expand Down Expand Up @@ -703,6 +680,48 @@ def run_benchmark_default(config):
month=gcc_dev_date.astype(datetime).month,
)

# ==================================================================
# GCC vs. GCC summary table
# ==================================================================
if config["options"]["outputs"]["summary_table"]:
print("\n%%% Creating GCC vs. GCC summary table %%%")

# Diagnostic collections to check
collections = [
'AerosolMass',
'Aerosols',
'Emissions',
'JValues',
'Metrics',
'SpeciesConc',
'StateMet',
]

# Print summary of which collections are identical
# between Ref & Dev, and which are not identical.
bmk.create_benchmark_summary_table(
gcc_vs_gcc_refdir,
config["data"]["ref"]["gcc"]["version"],
gcc_ref_date,
gcc_vs_gcc_devdir,
config["data"]["dev"]["gcc"]["version"],
gcc_dev_date,
collections = [
'AerosolMass',
'Aerosols',
'Emissions',
'JValues',
'Metrics',
'SpeciesConc',
'StateMet'
],
dst=gcc_vs_gcc_tablesdir,
outfilename="Summary.txt",
overwrite=True,
verbose=False,
)


# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Create GCHP vs GCC benchmark plots and tables
#
Expand Down Expand Up @@ -1014,6 +1033,38 @@ def run_benchmark_default(config):
title = "\n%%% Skipping GCHP vs. GCC Strat-Trop Exchange table %%%"
print(title)


# ==================================================================
# GCHP vs. GCC summary table
# ==================================================================
if config["options"]["outputs"]["summary_table"]:
print("\n%%% Creating GCHP vs. GCC summary table %%%")

# Print summary of which collections are identical
# between Ref & Dev, and which are not identical.
bmk.create_benchmark_summary_table(
gchp_vs_gcc_refdir,
config["data"]["dev"]["gcc"]["version"],
gcc_dev_date,
gchp_vs_gcc_devdir,
config["data"]["dev"]["gchp"]["version"],
gchp_dev_date,
collections=[
'AerosolMass',
'Aerosols',
'Emissions',
'JValues',
'Metrics',
'SpeciesConc',
'StateMet',
],
dst=gchp_vs_gcc_tablesdir,
outfilename="Summary.txt",
overwrite=True,
verbose=False,
dev_gchp=True
)

# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Create GCHP vs GCHP benchmark plots and tables
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Expand Down Expand Up @@ -1376,6 +1427,38 @@ def run_benchmark_default(config):
if config["options"]["outputs"]["ste_table"]:
print("\n%%% Skipping GCHP vs. GCHP Strat-Trop Exchange table %%%")

# ==================================================================
# GCHP vs. GCHP summary table
# ==================================================================
if config["options"]["outputs"]["summary_table"]:
print("\n%%% Creating GCHP vs. GCHP summary table %%%")

# Print summary of which collections are identical
# between Ref & Dev, and which are not identical.
bmk.create_benchmark_summary_table(
gchp_vs_gchp_refdir,
config["data"]["ref"]["gchp"]["version"],
gchp_ref_date,
gchp_vs_gchp_devdir,
config["data"]["dev"]["gchp"]["version"],
gchp_dev_date,
collections=[
'AerosolMass',
'Aerosols',
'Emissions',
'JValues',
'Metrics',
'SpeciesConc',
'StateMet',
],
dst=gchp_vs_gchp_tablesdir,
outfilename="Summary.txt",
overwrite=True,
verbose=False,
ref_gchp=True,
dev_gchp=True,
)

# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Create GCHP vs GCC difference of differences benchmark plots
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Expand Down Expand Up @@ -1423,7 +1506,7 @@ def run_benchmark_default(config):
# ==================================================================
# Print a message indicating that the benchmarks finished
# ==================================================================
print("\n %%%% All requested benchmark plots/tables created! %%%%")
print("\n%%%% All requested benchmark plots/tables created! %%%%")


def main():
Expand Down
Loading