forked from ESCOMP/CTSM
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request ESCOMP#2505 from TeaganKing/move_plumber_scripts
Move plumber2 scripts to python directory (on b4b branch)
- Loading branch information
Showing
7 changed files
with
480 additions
and
292 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,183 @@ | ||
#! /usr/bin/env python3 | ||
""" | ||
|------------------------------------------------------------------| | ||
|--------------------- Instructions -----------------------------| | ||
|------------------------------------------------------------------| | ||
This script is a simple wrapper for neon sites that performs the | ||
following: | ||
1) For neon sites, subset surface dataset from global dataset | ||
(i.e. ./subset_data.py ) | ||
2) Download neon and update the created surface dataset | ||
based on the downloaded neon data. | ||
(i.e. modify_singlept_site_neon.py) | ||
Instructions for running using conda python environments: | ||
../../py_env_create | ||
conda activate ctsm_py | ||
""" | ||
# Import libraries | ||
from __future__ import print_function | ||
|
||
import argparse | ||
import logging | ||
import os | ||
import subprocess | ||
import tqdm | ||
|
||
import pandas as pd | ||
|
||
|
||
def get_parser(): | ||
""" | ||
Get parser object for this script. | ||
""" | ||
parser = argparse.ArgumentParser( | ||
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter | ||
) | ||
|
||
parser.print_usage = parser.print_help | ||
|
||
parser.add_argument( | ||
"-v", | ||
"--verbose", | ||
help="Verbose mode will print more information. ", | ||
action="store_true", | ||
dest="verbose", | ||
default=False, | ||
) | ||
|
||
parser.add_argument( | ||
"--16pft", | ||
help="Create and/or modify 16-PFT surface datasets (e.g. for a FATES run) ", | ||
action="store_true", | ||
dest="pft_16", | ||
default=True, | ||
) | ||
|
||
return parser | ||
|
||
|
||
def execute(command): | ||
""" | ||
Function for running a command on shell. | ||
Args: | ||
command (str): | ||
command that we want to run. | ||
Raises: | ||
Error with the return code from shell. | ||
""" | ||
print("\n", " >> ", *command, "\n") | ||
|
||
try: | ||
subprocess.check_call(command, stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT) | ||
|
||
except subprocess.CalledProcessError as err: | ||
# raise RuntimeError("command '{}' return with error | ||
# (code {}): {}".format(e.cmd, e.returncode, e.output)) | ||
# print (e.ouput) | ||
print(err) | ||
|
||
|
||
def main(): | ||
""" | ||
Read plumber2_sites from csv, iterate through sites, and add dominant PFT | ||
""" | ||
|
||
args = get_parser().parse_args() | ||
|
||
if args.verbose: | ||
logging.basicConfig(level=logging.DEBUG) | ||
|
||
plumber2_sites = pd.read_csv("PLUMBER2_sites.csv", skiprows=4) | ||
|
||
for _, row in tqdm.tqdm(plumber2_sites.iterrows()): | ||
lat = row["Lat"] | ||
lon = row["Lon"] | ||
site = row["Site"] | ||
pft1 = row["pft1"] | ||
pctpft1 = row["pft1-%"] | ||
cth1 = row["pft1-cth"] | ||
cbh1 = row["pft1-cbh"] | ||
pft2 = row["pft2"] | ||
pctpft2 = row["pft2-%"] | ||
cth2 = row["pft2-cth"] | ||
cbh2 = row["pft2-cbh"] | ||
# overwrite missing values from .csv file | ||
if pft1 == -999: | ||
pft1 = 0 | ||
pctpft1 = 0 | ||
cth1 = 0 | ||
cbh1 = 0 | ||
if pft2 == -999: | ||
pft2 = 0 | ||
pctpft2 = 0 | ||
cth2 = 0 | ||
cbh2 = 0 | ||
clmsite = "1x1_PLUMBER2_" + site | ||
print("Now processing site :", site) | ||
|
||
if args.pft_16: | ||
# use surface dataset with 16 pfts, but overwrite to 100% 1 dominant PFT | ||
# don't set crop flag | ||
# set dominant pft | ||
subset_command = [ | ||
"./subset_data", | ||
"point", | ||
"--lat", | ||
str(lat), | ||
"--lon", | ||
str(lon), | ||
"--site", | ||
clmsite, | ||
"--dompft", | ||
str(pft1), | ||
str(pft2), | ||
"--pctpft", | ||
str(pctpft1), | ||
str(pctpft2), | ||
"--cth", | ||
str(cth1), | ||
str(cth2), | ||
"--cbh", | ||
str(cbh1), | ||
str(cbh2), | ||
"--create-surface", | ||
"--uniform-snowpack", | ||
"--cap-saturation", | ||
"--verbose", | ||
"--overwrite", | ||
] | ||
else: | ||
# use surface dataset with 78 pfts, and overwrite to 100% 1 dominant PFT | ||
# NOTE: FATES will currently not run with a 78-PFT surface dataset | ||
# set crop flag | ||
# set dominant pft | ||
subset_command = [ | ||
"./subset_data", | ||
"point", | ||
"--lat", | ||
str(lat), | ||
"--lon", | ||
str(lon), | ||
"--site", | ||
clmsite, | ||
"--crop", | ||
"--dompft", | ||
str(pft1), | ||
str(pft2), | ||
"--pctpft", | ||
str(pctpft1), | ||
str(pctpft2), | ||
"--create-surface", | ||
"--uniform-snowpack", | ||
"--cap-saturation", | ||
"--verbose", | ||
"--overwrite", | ||
] | ||
execute(subset_command) | ||
|
||
|
||
if __name__ == "__main__": | ||
main() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,191 @@ | ||
#! /usr/bin/env python3 | ||
|
||
""" | ||
Reads in .csv files with PLUMBER2 site information | ||
Creates individual usermod_dirs for each PLUMBER2 site with shell_commands | ||
""" | ||
|
||
# Import libraries | ||
from __future__ import print_function | ||
|
||
import os | ||
import tqdm | ||
|
||
import pandas as pd | ||
|
||
|
||
# Big ugly function to create usermod_dirs for each site | ||
def write_usermods( | ||
lat, lon, site, start_year, end_year, start_date, start_year_actual, start_tod, atm_ncpl, stop_n | ||
): | ||
""" | ||
Write information to be added to user mods | ||
""" | ||
|
||
site_dir = os.path.join("../../cime_config/usermods_dirs/PLUMBER2/", site) | ||
|
||
if not os.path.isdir(site_dir): | ||
os.makedirs(site_dir, exist_ok=True) | ||
|
||
# create files in each directory | ||
include = os.path.join(site_dir, "include_user_mods") | ||
i_file = open(include, "w") # or 'a' to add text instead of truncate | ||
i_file.write("../defaults") | ||
i_file.close() | ||
|
||
# pylint: disable=anomalous-backslash-in-string | ||
lai_stream = ( | ||
"\$DIN_LOC_ROOT/lnd/clm2/lai_streams/PLUMBER2/" | ||
+ site | ||
+ "/LAI_stream_" | ||
+ site | ||
+ "_" | ||
+ str(start_year) | ||
+ "-" | ||
+ str(end_year) | ||
+ ".nc" | ||
) | ||
shell = os.path.join(site_dir, "shell_commands") | ||
s_file = open(shell, "w") # or 'a' to add text instead of truncate | ||
# pylint: disable=line-too-long | ||
s_file.write( | ||
# TODO turn on following line after cdeps changes are added | ||
#'./xmlchange PLUMBER2SITE='+site + '\n' \ | ||
"./xmlchange PTS_LON=" + str(lon) + "\n" | ||
"./xmlchange PTS_LAT=" + str(lat) + "\n" | ||
"./xmlchange DATM_YR_END=" + str(end_year) + "\n" | ||
"./xmlchange START_TOD=" + str(start_tod) + "\n" | ||
"./xmlchange ATM_NCPL=" + str(atm_ncpl) + "\n" | ||
"\n" # TODO, get working for CTSM5.1: | ||
# remove the above line as it's redundant after PLUMBER2SITE is added | ||
# Alternatively, we can take this out of default/user_nl_clm | ||
# since doing it this way is works fine TODO for 5.2 | ||
"echo \"fsurdat='/glade/u/home/wwieder/CTSM/tools/site_and_regional/subset_data_single_point/surfdata_1x1_PLUMBER2_" | ||
+ site | ||
+ "_hist_16pfts_Irrig_CMIP6_simyr2000_c231005.nc ' \" >> user_nl_clm \n" | ||
'echo "CLM_USRDAT.PLUMBER2:datafiles= \$DIN_LOC_ROOT/atm/datm7/CLM1PT_data/PLUMBER2/' | ||
+ site | ||
+ "/CLM1PT_data/CTSM_DATM_" | ||
+ site | ||
+ "_" | ||
+ str(start_year) | ||
+ "-" | ||
+ str(end_year) | ||
+ '.nc " >> user_nl_datm_streams \n' | ||
'echo "presaero.SSP3-7.0:year_first=' + str(start_year) + '" >> user_nl_datm_streams \n' | ||
'echo "presaero.SSP3-7.0:year_last=' + str(end_year) + '" >> user_nl_datm_streams \n' | ||
'echo "presaero.SSP3-7.0:year_align=' + str(start_year) + '" >> user_nl_datm_streams \n' | ||
"\n" | ||
'echo "presndep.SSP3-7.0:year_first=' + str(start_year) + '" >> user_nl_datm_streams \n' | ||
'echo "presndep.SSP3-7.0:year_last=' + str(end_year) + '" >> user_nl_datm_streams \n' | ||
'echo "presndep.SSP3-7.0:year_align=' + str(start_year) + '" >> user_nl_datm_streams \n' | ||
"\n" | ||
'echo "co2tseries.SSP3-7.0:year_first=' + str(start_year) + '" >> user_nl_datm_streams \n' | ||
'echo "co2tseries.SSP3-7.0:year_last=' + str(end_year) + '" >> user_nl_datm_streams \n' | ||
'echo "co2tseries.SSP3-7.0:year_align=' + str(start_year) + '" >> user_nl_datm_streams \n' | ||
"\n" | ||
"compset=`./xmlquery COMPSET --value` \n" | ||
"CLM_USRDAT_NAME=`./xmlquery CLM_USRDAT_NAME --value` \n" | ||
"TEST=`./xmlquery TEST --value` \n" | ||
"\n" | ||
"# For a transient case run the whole length and do not cycle \n" | ||
"if [[ $compset =~ ^HIST ]]; then \n" | ||
" # Number of years that can be run for the full transient case \n" | ||
' if [[ $TEST != "TRUE" ]]; then \n' | ||
" ./xmlchange STOP_N=" + str(stop_n) + "\n" | ||
" fi \n" | ||
" # set start date for transient case with historical compset \n" | ||
" ./xmlchange RUN_STARTDATE=" + str(start_date) + "\n" | ||
" ./xmlchange DATM_YR_ALIGN=" + str(start_year_actual) + "\n" | ||
" ./xmlchange DATM_YR_START=" + str(start_year_actual) + "\n" | ||
"else \n" | ||
" # for spinup case with I2000 compset \n" | ||
" ./xmlchange RUN_STARTDATE=0001-01-01" + "\n" | ||
" ./xmlchange DATM_YR_ALIGN=" + str(1) + "\n" | ||
" ./xmlchange DATM_YR_START=" + str(start_year) + "\n" | ||
"fi \n" | ||
"\n" | ||
"# Turn on LAI streams for a SP case \n" | ||
"if [[ $compset =~ .*CLM[0-9]+%[^_]*SP.* ]]; then \n" | ||
" echo \"stream_fldfilename_lai='" + lai_stream + "'\" >> user_nl_clm \n" | ||
' echo "stream_year_last_lai=' + str(end_year) + '" >> user_nl_clm \n' | ||
" if [[ $compset =~ ^HIST ]]; then \n" | ||
" # for transient case with a historical compset \n" | ||
' echo "model_year_align_lai=' + str(start_year_actual) + '" >> user_nl_clm \n' | ||
' echo "stream_year_first_lai=' + str(start_year_actual) + '" >> user_nl_clm \n' | ||
" else \n" | ||
" # for a spinup case with a i2000 compset \n" | ||
' echo "model_year_align_lai=1" >> user_nl_clm \n' | ||
' echo "stream_year_first_lai=' + str(start_year) + '" >> user_nl_clm \n' | ||
" fi \n" | ||
"fi \n" | ||
"\n" | ||
) | ||
# pylint: enable=line-too-long, anomalous-backslash-in-string | ||
|
||
s_file.close() | ||
|
||
# add baseflow_scalar = 0 to user_nl_clm for wetland sites | ||
wetland = [ | ||
"CZ-wet", | ||
"DE-SfN", | ||
"FI-Kaa", | ||
"FI-Lom", | ||
"RU-Che", | ||
"SE-Deg", | ||
"US-Los", | ||
"US-Myb", | ||
"US-Tw4", | ||
"PL-wet", | ||
] | ||
if any(x == site for x in wetland): | ||
s_file = open(shell, "a") # or 'a' to add text instead of truncate | ||
s_file.write( | ||
"\n" | ||
"# set baseflow scalar to zero for wetland site \n" | ||
'echo "baseflow_scalar = 0" >> user_nl_clm' | ||
) | ||
s_file.close() | ||
|
||
|
||
# End write_usermods function | ||
|
||
|
||
def main(): | ||
""" | ||
Iterate through plumber2 sites and create usermod_dirs | ||
""" | ||
|
||
# For now we can just run the 'main' program as a loop | ||
plumber2_sites = pd.read_csv("PLUMBER2_sites.csv", skiprows=4) | ||
|
||
for _, row in tqdm.tqdm(plumber2_sites.iterrows()): | ||
lat = row["Lat"] | ||
lon = row["Lon"] | ||
site = row["Site"] | ||
start_year = row["start_year"] | ||
end_year = row["end_year"] | ||
start_date = row["RUN_STARTDATE"] | ||
start_year_actual = start_date[:4] | ||
start_tod = row["START_TOD"] | ||
atm_ncpl = row["ATM_NCPL"] | ||
stop_n = 1 + end_year - start_year | ||
|
||
write_usermods( | ||
lat, | ||
lon, | ||
site, | ||
start_year, | ||
end_year, | ||
start_date, | ||
start_year_actual, | ||
start_tod, | ||
atm_ncpl, | ||
stop_n, | ||
) | ||
|
||
|
||
if __name__ == "__main__": | ||
main() |
Oops, something went wrong.