diff --git a/README.md b/README.md index f1fa48fd4..b9983b39f 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ https://ufs-community.github.io/UFS_UTILS/. Utility | Programmer(s) --------|---------- chgres_cube | George Gayno, Jeff Beck, Larissa Reames -cpld_gridgen | Denise Worthen, Minsuk Ji +cpld_gridgen | Denise Worthen emcsfc_ice_blend | George Gayno emcsfc_snow2mdl | George Gayno fre-nctools | GFDL progammer @@ -58,6 +58,14 @@ It also uses the following repositories: ## Installing +On Orion, Jet, Hera and WCOSS2, invoke the build script: + +``` +./build_all.sh +``` + +Otherwise, do: + ``` mkdir build cd build diff --git a/build_all.sh b/build_all.sh index 6b5dad10d..e401e4e34 100755 --- a/build_all.sh +++ b/build_all.sh @@ -30,7 +30,7 @@ fi # access the EMC ftp site, so turn off the build (-DBUILD_TESTING=OFF) of the units tests accordingly. # Those with access to the EMC ftp site are: Orion, Hera, WCOSS-Cray, WCOSS-Dell. -if [[ "$target" == "hera" || "$target" == "orion" || "$target" == "wcoss_cray" || "$target" == "wcoss_dell_p3" ]]; then +if [[ "$target" == "hera" || "$target" == "orion" || "$target" == "wcoss_cray" || "$target" == "wcoss_dell_p3" || "$target" == "wcoss2" ]]; then CMAKE_FLAGS="-DCMAKE_INSTALL_PREFIX=../ -DEMC_EXEC_DIR=ON -DBUILD_TESTING=OFF" #CMAKE_FLAGS="-DCMAKE_INSTALL_PREFIX=../ -DEMC_EXEC_DIR=ON -DBUILD_TESTING=ON" #CMAKE_FLAGS="-DCMAKE_INSTALL_PREFIX=../ -DEMC_EXEC_DIR=ON -DENABLE_DOCS=ON -DBUILD_TESTING=ON" diff --git a/driver_scripts/driver_grid.wcoss2.sh b/driver_scripts/driver_grid.wcoss2.sh new file mode 100755 index 000000000..90eac38b3 --- /dev/null +++ b/driver_scripts/driver_grid.wcoss2.sh @@ -0,0 +1,159 @@ +#!/bin/bash + +#PBS -o log +#PBS -e log +#PBS -q debug +#PBS -A GFS-DEV +#PBS -l walltime=00:05:00 +#PBS -N make_grid +#PBS -l select=1:ncpus=24:mem=100GB + +#----------------------------------------------------------------------- +# Driver script to create a cubic-sphere based model grid on WCOSS2. +# +# Produces the following files (netcdf, each tile in separate file): +# 1) 'mosaic' and 'grid' files containing lat/lon and other +# records that describe the model grid. +# 2) 'oro' files containing land mask, terrain and gravity +# wave drag fields. +# 3) Surface climo fields, such as soil type, vegetation +# greenness and albedo. +# +# Note: The sfc_climo_gen program only runs with an +# mpi task count that is a multiple of six. This is +# an ESMF library requirement. Large grids may require +# tasks spread across multiple nodes. The orography code +# benefits from threads. +# +# To run, do the following: +# +# 1) Set "C" resolution, "res" - Example: res=96. +# 2) Set grid type ("gtype"). Valid choices are +# "uniform" - global uniform grid +# "stretch" - global stretched grid +# "nest" - global stretched grid with nest +# "regional_gfdl" - stand-alone gfdl regional grid +# "regional_esg" - stand-alone extended Schmidt gnomonic +# (esg) regional grid +# 3) For "uniform" and "regional_gfdl" grids - to include lake +# fraction and depth, set "add_lake" to true, and the +# "lake_cutoff" value. +# 4) For "stretch" and "nest" grids, set the stretching factor - +# "stretch_fac", and center lat/lon of highest resolution +# tile - "target_lat" and "target_lon". +# 5) For "nest" grids, set the refinement ratio - "refine_ratio", +# the starting/ending i/j index location within the parent +# tile - "istart_nest", "jstart_nest", "iend_nest", "jend_nest" +# 6) For "regional_gfdl" grids, set the "halo". Default is three +# rows/columns. +# 7) For "regional_esg" grids, set center lat/lon of grid, +# - "target_lat/lon" - the i/j dimensions - "i/jdim", the +# x/y grid spacing - "delx/y", and halo. +# 8) Set working directory - TEMP_DIR - and path to the repository +# clone - home_dir. +# 9) Check settings for 'make_gsl_orog' and 'veg_type_src' +# below. +# 10) Submit script: "cat $script | bsub". +# 11) All files will be placed in "out_dir". +# +#----------------------------------------------------------------------- + +cd $PBS_O_WORKDIR + +source ../sorc/machine-setup.sh > /dev/null 2>&1 +module use ../modulefiles +module load build.$target.intel +module list + +#----------------------------------------------------------------------- +# Set grid specs here. +#----------------------------------------------------------------------- + +export gtype=uniform # 'uniform', 'stretch', 'nest', + # 'regional_gfdl', 'regional_esg' +export make_gsl_orog=false # 'true' if user needs 'oro' files for GSL + # orographic drag suite +export veg_type_src="modis.igbp.0.05" # veg type data. + # For viirs-based vegetation type data, set to: + # 1) "viirs.igbp.0.05" for global 5km data + # 2) "viirs.igbp.0.1" for global 10km data + # 3) "viirs.igbp.0.03" for global 3km data + # 4) "viirs.igbp.conus.0.01" for regional 1km data + # For the modis-based data, set to: + # 1) "modis.igbp.0.05" for global 5km data + # 2) "modis.igbp.0.03" for global 3km data + # 3) "modis.igbp.conus.0.01" for regional 1km data + +if [ $gtype = uniform ]; then + export res=96 + export add_lake=false # Add lake frac and depth to orography data. + export lake_cutoff=0.20 # lake frac < lake_cutoff ignored when add_lake=T +elif [ $gtype = stretch ]; then + export res=96 + export stretch_fac=1.5 # Stretching factor for the grid + export target_lon=-97.5 # Center longitude of the highest resolution tile + export target_lat=35.5 # Center latitude of the highest resolution tile +elif [ $gtype = nest ] || [ $gtype = regional_gfdl ]; then + export add_lake=false # Add lake frac and depth to orography data. + export lake_cutoff=0.20 # lake frac < lake_cutoff ignored when add_lake=T + export res=768 + export stretch_fac=1.5 # Stretching factor for the grid + export target_lon=-97.5 # Center longitude of the highest resolution tile + export target_lat=38.5 # Center latitude of the highest resolution tile + export refine_ratio=3 # The refinement ratio + export istart_nest=123 # Starting i-direction index of nest grid in parent tile supergrid + export jstart_nest=331 # Starting j-direction index of nest grid in parent tile supergrid + export iend_nest=1402 # Ending i-direction index of nest grid in parent tile supergrid + export jend_nest=1194 # Ending j-direction index of nest grid in parent tile supergrid + export halo=3 # Lateral boundary halo +elif [ $gtype = regional_esg ] ; then + export res=-999 # equivalent resolution is computed + export target_lon=-97.5 # Center longitude of grid + export target_lat=35.5 # Center latitude of grid + export idim=301 # Dimension of grid in 'i' direction + export jdim=200 # Dimension of grid in 'j' direction + export delx=0.0585 # Grid spacing (in degrees) in the 'i' direction + # on the SUPERGRID (which has twice the resolution of + # the model grid). The physical grid spacing in the 'i' + # direction is related to delx as follows: + # distance = 2*delx*(circumf_Earth/360 deg) + export dely=0.0585 # Grid spacing (in degrees) in the 'j' direction. + export halo=3 # number of row/cols for halo +fi + +#----------------------------------------------------------------------- +# Check paths. +# home_dir - location of repository. +# TEMP_DIR - working directory. +# out_dir - where files will be placed upon completion. +#----------------------------------------------------------------------- + +export home_dir=$PBS_O_WORKDIR/.. +export TEMP_DIR=/lfs/h2/emc/stmp/$LOGNAME/fv3_grid.$gtype +export out_dir=/lfs/h2/emc/stmp/$LOGNAME/my_grids + +#----------------------------------------------------------------------- +# Should not need to change anything below here unless you want to +# to change the job card for the number of tasks to use. Then, +# you will need to check APRUN_SFC and OMP_NUM_THREADS. +#----------------------------------------------------------------------- + +set -x + +export APRUN=time +export APRUN_SFC="mpiexec -n 24 -ppn 24 -cpu-bind core" +export OMP_NUM_THREADS=24 # orog code worked best with 24 threads. +export OMP_PLACES=cores +export OMP_STACKSIZE=2048m +export machine=WCOSS2_CRAY + +ulimit -a +ulimit -s unlimited + +#----------------------------------------------------------------------- +# Start script. +#----------------------------------------------------------------------- + +$home_dir/ush/fv3gfs_driver_grid.sh + +exit diff --git a/fix/link_fixdirs.sh b/fix/link_fixdirs.sh index bc5ea6f41..28485ac4c 100755 --- a/fix/link_fixdirs.sh +++ b/fix/link_fixdirs.sh @@ -9,17 +9,17 @@ machine=${2} if [ $# -lt 2 ]; then set +x echo '***ERROR*** must specify two arguements: (1) RUN_ENVIR, (2) machine' - echo ' Syntax: link_fv3gfs.sh ( nco | emc ) ( cray | dell | hera | jet | orion | s4 )' + echo ' Syntax: link_fv3gfs.sh ( nco | emc ) ( wcoss2 | cray | dell | hera | jet | orion | s4 )' exit 1 fi if [ $RUN_ENVIR != emc -a $RUN_ENVIR != nco ]; then set +x echo '***ERROR*** unsupported run environment' - echo 'Syntax: link_fv3gfs.sh ( nco | emc ) ( cray | dell | hera | jet | orion | s4 )' + echo 'Syntax: link_fv3gfs.sh ( nco | emc ) ( wcoss2 | cray | dell | hera | jet | orion | s4 )' exit 1 fi -if [ $machine != cray -a $machine != hera -a $machine != dell -a $machine != jet -a $machine != orion -a $machine != s4 ]; then +if [ $machine != wcoss2 -a $machine != cray -a $machine != hera -a $machine != dell -a $machine != jet -a $machine != orion -a $machine != s4 ]; then set +x echo '***ERROR*** unsupported machine' echo 'Syntax: link_fv3gfs.sh ( nco | emc ) ( cray | dell | hera | jet | orion | s4 )' @@ -45,6 +45,8 @@ elif [ $machine = "jet" ]; then FIX_DIR="/lfs4/HFIP/hfv3gfs/glopara/git/fv3gfs/fix" elif [ $machine = "orion" ]; then FIX_DIR="/work/noaa/global/glopara/fix" +elif [ $machine = "wcoss2" ]; then + FIX_DIR="/lfs/h2/emc/global/noscrub/kate.friedman/glopara/FIX/fix" elif [ $machine = "s4" ]; then FIX_DIR="/data/prod/glopara/fix" fi diff --git a/modulefiles/build.wcoss2.intel.lua b/modulefiles/build.wcoss2.intel.lua new file mode 100644 index 000000000..e3869cc8a --- /dev/null +++ b/modulefiles/build.wcoss2.intel.lua @@ -0,0 +1,79 @@ +help([[ +Load environment to compile UFS_UTILS on WCOSS2 +]]) + +cmake_ver=os.getenv("cmake_ver") or "3.20.2" +load(pathJoin("cmake", cmake_ver)) + +PrgEnv_intel_ver=os.getenv("PrgEnv_intel_ver") or "8.1.0" +load(pathJoin("PrgEnv-intel", PrgEnv_intel_ver)) + +craype_ver=os.getenv("craype_ver") or "2.7.13" +load(pathJoin("craype", craype_ver)) + +intel_ver=os.getenv("intel_ver") or "19.1.3.304" +load(pathJoin("intel", intel_ver)) + +cray_mpich_ver=os.getenv("cray_mpich_ver") or "8.1.7" +load(pathJoin("cray-mpich", cray_mpich_ver)) + + +libjpeg_ver=os.getenv("libjpeg_ver") or "9c" +load(pathJoin("libjpeg", libjpeg_ver)) + +zlib_ver=os.getenv("zlib_ver") or "1.2.11" +load(pathJoin("zlib", zlib_ver)) + +libpng_ver=os.getenv("libpng_ver") or "1.6.37" +load(pathJoin("libpng", libpng_ver)) + +hdf5_ver=os.getenv("hdf5_ver") or "1.10.6" +load(pathJoin("hdf5", hdf5_ver)) + +netcdf_ver=os.getenv("netcdf_ver") or "4.7.4" +load(pathJoin("netcdf", netcdf_ver)) + +bacio_ver=os.getenv("bacio_ver") or "2.4.1" +load(pathJoin("bacio", bacio_ver)) + +sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" +load(pathJoin("sfcio", sfcio_ver)) + +w3nco_ver=os.getenv("w3nco_ver") or "2.4.1" +load(pathJoin("w3nco", w3nco_ver)) + +nemsio_ver=os.getenv("nemsio_ver") or "2.5.2" +load(pathJoin("nemsio", nemsio_ver)) + +sigio_ver=os.getenv("sigio_ver") or "2.3.2" +load(pathJoin("sigio", sigio_ver)) + +sp_ver=os.getenv("sp_ver") or "2.3.3" +load(pathJoin("sp", sp_ver)) + +ip_ver=os.getenv("ip_ver") or "3.3.3" +load(pathJoin("ip", ip_ver)) + +g2_ver=os.getenv("g2_ver") or "3.4.5" +load(pathJoin("g2", g2_ver)) + +-- for mpiexec command +cray_pals_ver=os.getenv("cray_pals_ver") or "1.0.12" +load(pathJoin("cray-pals", cray_pals_ver)) + +udunits_ver=os.getenv("udunits_ver") or "2.2.28" +load(pathJoin("udunits", udunits_ver)) + +gsl_ver=os.getenv("gsl_ver") or "2.7" +load(pathJoin("gsl", gsl_ver)) + +nco_ver=os.getenv("nco_ver") or "4.9.7" +load(pathJoin("nco", nco_ver)) + +setenv("HPC_OPT","/apps/ops/para/libs") +prepend_path("MODULEPATH", "/apps/ops/para/libs/modulefiles/compiler/intel/19.1.3.304") +prepend_path("MODULEPATH", "/apps/ops/para/libs/modulefiles/mpi/intel/19.1.3.304/cray-mpich/8.1.7") +esmf_ver=os.getenv("esmf_ver") or "8.2.1b04" +load(pathJoin("esmf", esmf_ver)) + +whatis("Description: UFS_UTILS build environment") diff --git a/reg_tests/chgres_cube/README b/reg_tests/chgres_cube/README index 424accd4b..23d03f292 100644 --- a/reg_tests/chgres_cube/README +++ b/reg_tests/chgres_cube/README @@ -7,18 +7,16 @@ To run the consistency tests: 1) Build chgres_cube program. Go to ./sorc and invoke 'build_all.sh' with no arguments. -2) Invoke driver script for your machine. See script - prolog for details. Supported machines are: - - Hera (driver.hera.sh) - - WCOSS-Cray (driver.cray.sh) - - WCOSS-Dell (driver.dell.sh) - - Jet (driver.jet.sh) - - Orion (driver.orion.sh) +2) Set the fixed directories. Go to ./fix and + invoke the script. +3) Invoke driver script for your machine. See script + prolog for details. -A series of daisy-chained tests will run. A test fails -if the output files differ from the baseline set of files -as determined by the 'nccmp' utility. +A series of tests will run. A test fails if the output +files differ from the baseline set of files as determined +by the 'nccmp' utility. -Log output from the tests will be in "regression.log". +Log output from each test will be placed in its own +logfile - "consistency.log??". A summary of results will be in "summary.log". diff --git a/reg_tests/chgres_cube/driver.wcoss2.sh b/reg_tests/chgres_cube/driver.wcoss2.sh new file mode 100755 index 000000000..8c733853d --- /dev/null +++ b/reg_tests/chgres_cube/driver.wcoss2.sh @@ -0,0 +1,236 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run the chgres_cube consistency tests on WCOSS2. +# +# Set WORK_DIR to a general working location outside the UFS_UTILS directory. +# The exact working directory (OUTDIR) will be WORK_DIR/reg_tests/chgres-cube. +# +# Set the PROJECT_CODE and QUEUE as appropriate. +# +# Invoke the script with no arguments. To check the queue, type: +# "qstat -u USERNAME". +# +# The run output will be stored in OUTDIR. Log output will be placed +# in LOG_FILE??. Once the suite has completed, a summary is placed +# in SUM_FILE. +# +# A test fails when its output does not match the baseline files as +# determined by the "nccmp" utility. The baseline files are stored in +# HOMEreg. +# +#----------------------------------------------------------------------------- + +set -x + +compiler=${compiler:-"intel"} + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +module use ../../modulefiles +module load build.$target.$compiler +module list + +export OUTDIR="${WORK_DIR:-/lfs/h2/emc/stmp/$LOGNAME}" +export OUTDIR="${OUTDIR}/reg-tests/chgres-cube" + +PROJECT_CODE="${PROJECT_CODE:-GFS-DEV}" +QUEUE="${QUEUE:-dev}" + +#----------------------------------------------------------------------------- +# Should not have to change anything below here. HOMEufs is the root +# directory of your UFS_UTILS clone. HOMEreg contains the input data +# and baseline data for each test. +#----------------------------------------------------------------------------- + +export UPDATE_BASELINE="FALSE" +#export UPDATE_BASELINE="TRUE" + +if [ "$UPDATE_BASELINE" = "TRUE" ]; then + source ../get_hash.sh +fi + +export HOMEufs=$PWD/../.. + +export HOMEreg=/lfs/h2/emc/global/noscrub/George.Gayno/ufs_utils.git/reg_tests/chgres_cube + +LOG_FILE=consistency.log +SUM_FILE=summary.log +rm -f $LOG_FILE* $SUM_FILE + +export OMP_STACKSIZE=1024M + +export NCCMP=/lfs/h2/emc/global/noscrub/George.Gayno/util/nccmp/nccmp-1.8.5.0/src/nccmp +#export NCCMP=${NCCMP:-nccmp} +rm -fr $OUTDIR + +this_dir=$PWD + +#----------------------------------------------------------------------------- +# Initialize C96 using FV3 warm restart files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log01 +export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" +TEST1=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N c96.fv3.restart -l select=1:ncpus=6:ompthreads=1:mem=10GB $PWD/c96.fv3.restart.sh) + +#----------------------------------------------------------------------------- +# Initialize C192 using FV3 tiled history files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log02 +export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" +TEST2=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N c192.fv3.history -l select=1:ncpus=6:ompthreads=1:mem=10GB $PWD/c192.fv3.history.sh) + +#----------------------------------------------------------------------------- +# Initialize C96 using FV3 gaussian nemsio files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log03 +export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" +TEST3=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N c96.fv3.nemsio -l select=1:ncpus=6:ompthreads=1:mem=45GB $PWD/c96.fv3.nemsio.sh) + +#----------------------------------------------------------------------------- +# Initialize C96 using spectral GFS sigio/sfcio files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log04 +export OMP_PLACES=cores +export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core --depth 4" +TEST4=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:10:00 \ + -N c96.gfs.sigio -l select=1:ncpus=24:ompthreads=4:mem=45GB $PWD/c96.gfs.sigio.sh) + +#----------------------------------------------------------------------------- +# Initialize C96 using spectral GFS gaussian nemsio files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log05 +export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" +TEST5=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N c96.gfs.nemsio -l select=1:ncpus=6:ompthreads=1:mem=35GB $PWD/c96.gfs.nemsio.sh) + +#----------------------------------------------------------------------------- +# Initialize regional C96 using FV3 gaussian nemsio files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log06 +export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" +TEST6=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N c96.regional -l select=1:ncpus=6:ompthreads=1:mem=35GB $PWD/c96.regional.sh) + +#----------------------------------------------------------------------------- +# Initialize C96 using FV3 gaussian netcdf files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log07 +export APRUN="mpiexec -n 12 -ppn 12 --cpu-bind core" +TEST7=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N c96.fv3.netcdf -l select=1:ncpus=12:ompthreads=1:mem=80GB $PWD/c96.fv3.netcdf.sh) + +#----------------------------------------------------------------------------- +# Initialize global C192 using GFS GRIB2 files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log08 +export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" +TEST8=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N c192.gfs.grib2 -l select=1:ncpus=6:ompthreads=1:mem=15GB $PWD/c192.gfs.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 25-KM USING GFS GRIB2 files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log09 +export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" +TEST9=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N 25km.conus.gfs.grib2.conus -l select=1:ncpus=6:ompthreads=1:mem=15GB $PWD/25km.conus.gfs.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GFS PHYSICS. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log10 +export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" +TEST10=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N 3km.conus.hrrr.gfssdf.grib2.conus -l select=1:ncpus=6:ompthreads=1:mem=75GB $PWD/3km.conus.hrrr.gfssdf.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GSD PHYSICS AND SFC VARS FROM FILE. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log11 +export APRUN="mpiexec -n 12 -ppn 12 --cpu-bind core" +TEST11=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N 3km.conus.hrrr.newsfc.grib2.conus -l select=1:ncpus=12:ompthreads=1:mem=75GB $PWD/3km.conus.hrrr.newsfc.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 13-KM USING NAM GRIB2 file WITH GFS PHYSICS . +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log12 +export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" +TEST12=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N 13km.conus.nam.grib2.conus -l select=1:ncpus=6:ompthreads=1:mem=15GB $PWD/13km.conus.nam.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 13-KM USING RAP GRIB2 file WITH GSD PHYSICS . +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log13 +export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" +TEST13=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N 13km.conus.rap.grib2.conus -l select=1:ncpus=6:ompthreads=1:mem=15GB $PWD/13km.conus.rap.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 13-KM NA USING NCEI GFS GRIB2 file WITH GFS PHYSICS . +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log14 +export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" +TEST14=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N 13km.na.gfs.ncei.grib2.conus -l select=1:ncpus=6:ompthreads=1:mem=25GB $PWD/13km.na.gfs.ncei.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize C96 WAM IC using FV3 gaussian netcdf files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log15 +export APRUN="mpiexec -n 12 -ppn 12 --cpu-bind core" +TEST15=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N c96.fv3.netcdf2wam -l select=1:ncpus=12:ompthreads=1:mem=75GB $PWD/c96.fv3.netcdf2wam.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 25-KM USING GFS PGRIB2+BGRIB2 files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log16 +export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" +TEST16=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N 25km.conus.gfs.pbgrib2.conus -l select=1:ncpus=6:ompthreads=1:mem=15GB $PWD/25km.conus.gfs.pbgrib2.sh) + +#----------------------------------------------------------------------------- +# Initialize global C96 using GEFS GRIB2 files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log17 +export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" +TEST17=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N c96.gefs.grib2 -l select=1:ncpus=6:ompthreads=1:mem=15GB $PWD/c96.gefs.grib2.sh) + +#----------------------------------------------------------------------------- +# Create summary log. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log +qsub -V -o ${LOG_FILE} -e ${LOG_FILE} -q $QUEUE -A $PROJECT_CODE -l walltime=00:01:00 \ + -N chgres_summary -l select=1:ncpus=1:mem=100MB \ + -W depend=afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6:$TEST7:$TEST8:$TEST9:$TEST10:$TEST11:$TEST12:$TEST13:$TEST14:$TEST15:$TEST16:$TEST17 << EOF +#!/bin/bash +cd ${this_dir} +grep -a '<<<' ${LOG_FILE}?? | grep -v echo > $SUM_FILE +EOF + +exit 0 diff --git a/reg_tests/cpld_gridgen/rt.sh b/reg_tests/cpld_gridgen/rt.sh index ffda43358..9c1d23366 100755 --- a/reg_tests/cpld_gridgen/rt.sh +++ b/reg_tests/cpld_gridgen/rt.sh @@ -54,7 +54,7 @@ check_results() { echo "....MISSING file" | tee -a $PATHRT/$REGRESSIONTEST_LOG test_status=FAIL else - nccmp -dmfqS $(basename ${file}) $file >>${PATHRT}/nccmp_${TEST_NAME}.log 2>&1 && d=$? || d=$? + $NCCMP -dmfqS $(basename ${file}) $file >>${PATHRT}/nccmp_${TEST_NAME}.log 2>&1 && d=$? || d=$? if [[ $d -ne 0 ]]; then echo "....NOT OK" | tee -a $PATHRT/$REGRESSIONTEST_LOG test_status=FAIL @@ -115,12 +115,22 @@ COMPILE_LOG=compile.log REGRESSIONTEST_LOG=RegressionTests_$target.$compiler.log rm -f fail_test* $COMPILE_LOG run_*.log nccmp_*.log summary.log -if [[ $target = hera ]]; then +if [[ $target = wcoss2 ]]; then + STMP=${STMP:-/lfs/h2/emc/stmp/$USER} + export MOM6_FIXDIR=/lfs/h2/emc/global/noscrub/George.Gayno/ufs_utils.git/reg_tests/cpld_gridgen/fix_mom6 + BASELINE_ROOT=/lfs/h2/emc/global/noscrub/George.Gayno/ufs_utils.git/reg_tests/cpld_gridgen/baseline_data + ACCOUNT=${ACCOUNT:-GFS-DEV} + export APRUN="mpiexec -n 1 -ppn 1 --cpu-bind core" + QUEUE=${QUEUE:-dev} + SBATCH_COMMAND="./cpld_gridgen.sh" + NCCMP=/lfs/h2/emc/global/noscrub/George.Gayno/util/nccmp/nccmp-1.8.5.0/src/nccmp +elif [[ $target = hera ]]; then STMP=${STMP:-/scratch1/NCEPDEV/stmp4/$USER} export MOM6_FIXDIR=/scratch1/NCEPDEV/nems/role.ufsutils/ufs_utils/reg_tests/cpld_gridgen/fix_mom6 BASELINE_ROOT=/scratch1/NCEPDEV/nems/role.ufsutils/ufs_utils/reg_tests/cpld_gridgen/baseline_data ACCOUNT=${ACCOUNT:-nems} QUEUE=${QUEUE:-batch} + NCCMP=nccmp PARTITION=hera SBATCH_COMMAND="./cpld_gridgen.sh" elif [[ $target = orion ]]; then @@ -129,6 +139,7 @@ elif [[ $target = orion ]]; then BASELINE_ROOT=/work/noaa/nems/role-nems/ufs_utils/reg_tests/cpld_gridgen/baseline_data ACCOUNT=${ACCOUNT:-nems} QUEUE=${QUEUE:-batch} + NCCMP=nccmp PARTITION=orion ulimit -s unlimited SBATCH_COMMAND="./cpld_gridgen.sh" @@ -138,6 +149,7 @@ elif [[ $target = jet ]]; then BASELINE_ROOT=/lfs4/HFIP/hfv3gfs/emc.nemspara/role.ufsutils/ufs_utils/reg_tests/cpld_gridgen/baseline_data ACCOUNT=${ACCOUNT:-h-nems} QUEUE=${QUEUE:-batch} + NCCMP=nccmp PARTITION=xjet ulimit -s unlimited SBATCH_COMMAND="./cpld_gridgen.sh" @@ -232,12 +244,31 @@ while read -r line || [ "$line" ]; do cp $PATHRT/parm/grid.nml.IN $RUNDIR cd $RUNDIR - sbatch --wait --ntasks-per-node=1 --nodes=1 --mem=4G -t 0:05:00 -A $ACCOUNT -q $QUEUE -J $TEST_NAME \ - --partition=$PARTITION -o $PATHRT/run_${TEST_NAME}.log -e $PATHRT/run_${TEST_NAME}.log \ - --wrap "$SBATCH_COMMAND $TEST_NAME" && d=$? || d=$? + if [[ $target = wcoss2 ]]; then + +# rm -f $RUNDIR/bad.${TEST_NAME} + + TEST=$(qsub -V -o $PATHRT/run_${TEST_NAME}.log -e $PATHRT/run_${TEST_NAME}.log -q $QUEUE -A $ACCOUNT \ + -Wblock=true -l walltime=00:05:00 -N $TEST_NAME -l select=1:ncpus=1:mem=8GB -v RESNAME=$TEST_NAME $SBATCH_COMMAND) + +# qsub -o $PATHRT/run_${TEST_NAME}.log -e $PATHRT/run_${TEST_NAME}.log -q $QUEUE -A $ACCOUNT \ +# -Wblock=true -l walltime=00:01:00 -N chgres_summary -l select=1:ncpus=1:mem=100MB -W depend=afternotok:$TEST << EOF +#!/bin/bash +# touch $RUNDIR/bad.${TEST_NAME} +#EOF +# if [[ -f $RUNDIR/bad.${TEST_NAME} ]]; then +# error "Batch job for test $TEST_NAME did not finish successfully. Refer to run_${TEST_NAME}.log" +# fi + + else + sbatch --wait --ntasks-per-node=1 --nodes=1 --mem=4G -t 0:05:00 -A $ACCOUNT -q $QUEUE -J $TEST_NAME \ + --partition=$PARTITION -o $PATHRT/run_${TEST_NAME}.log -e $PATHRT/run_${TEST_NAME}.log \ + --wrap "$SBATCH_COMMAND $TEST_NAME" && d=$? || d=$? + + if [[ d -ne 0 ]]; then + error "Batch job for test $TEST_NAME did not finish successfully. Refer to run_${TEST_NAME}.log" + fi - if [[ d -ne 0 ]]; then - error "Batch job for test $TEST_NAME did not finish successfully. Refer to run_${TEST_NAME}.log" fi check_results diff --git a/reg_tests/global_cycle/driver.wcoss2.sh b/reg_tests/global_cycle/driver.wcoss2.sh new file mode 100755 index 000000000..970a3cab5 --- /dev/null +++ b/reg_tests/global_cycle/driver.wcoss2.sh @@ -0,0 +1,86 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run global_cycle consistency test on WCOSS2. +# +# Set $WORK_DIR to your working directory. Set the project code +# and queue as appropriate. +# +# Invoke the script from the command line as follows: ./$script +# +# Log output is placed in consistency.log??. A summary is +# placed in summary.log +# +# A test fails when its output does not match the baseline files +# as determined by the 'nccmp' utility. This baseline files are +# stored in HOMEreg. +# +#----------------------------------------------------------------------------- + +set -x + +compiler=${compiler:-"intel"} + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +module use ../../modulefiles +module load build.$target.$compiler +module list + +WORK_DIR="${WORK_DIR:-/lfs/h2/emc/stmp/$LOGNAME}" + +PROJECT_CODE="${PROJECT_CODE:-GFS-DEV}" +QUEUE="${QUEUE:-dev}" + +#----------------------------------------------------------------------------- +# Should not have to change anything below. +#----------------------------------------------------------------------------- + +export UPDATE_BASELINE="FALSE" +#export UPDATE_BASELINE="TRUE" + +if [ "$UPDATE_BASELINE" = "TRUE" ]; then + source ../get_hash.sh +fi + +DATA_DIR="${WORK_DIR}/reg-tests/global-cycle" + +export HOMEreg=/lfs/h2/emc/global/noscrub/George.Gayno/ufs_utils.git/reg_tests/global_cycle + +export OMP_NUM_THREADS_CY=2 +export OMP_PLACES=cores + +export APRUNCY="mpiexec -n 6 -ppn 6 --cpu-bind core --depth ${OMP_NUM_THREADS_CY}" + +export NWPROD=$PWD/../.. + +reg_dir=$PWD + +export NCCMP=/lfs/h2/emc/global/noscrub/George.Gayno/util/nccmp/nccmp-1.8.5.0/src/nccmp + +LOG_FILE=consistency.log +rm -f ${LOG_FILE}* + +export DATA="${DATA_DIR}/test1" +export COMOUT=$DATA +TEST1=$(qsub -V -o ${LOG_FILE}01 -e ${LOG_FILE}01 -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N c768.fv3gfs -l select=1:ncpus=12:mem=12GB $PWD/C768.fv3gfs.sh) + +export DATA="${DATA_DIR}/test2" +export COMOUT=$DATA +TEST2=$(qsub -V -o ${LOG_FILE}02 -e ${LOG_FILE}02 -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N c768.lndincsoil -l select=1:ncpus=12:mem=8GB $PWD/C768.lndincsoil.sh) + +export DATA="${DATA_DIR}/test3" +export COMOUT=$DATA +TEST3=$(qsub -V -o ${LOG_FILE}03 -e ${LOG_FILE}03 -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N c768.lndincsnow -l select=1:ncpus=12:mem=8GB $PWD/C768.lndincsnow.sh) + +qsub -V -o ${LOG_FILE} -e ${LOG_FILE} -q $QUEUE -A $PROJECT_CODE -l walltime=00:01:00 \ + -N cycle_summary -l select=1:ncpus=1:mem=100MB -W depend=afterok:$TEST1:$TEST2:$TEST3 << EOF +#!/bin/bash +cd $reg_dir +grep -a '<<<' ${LOG_FILE}?? | grep -v echo > summary.log +EOF + +exit diff --git a/reg_tests/grid_gen/driver.wcoss2.sh b/reg_tests/grid_gen/driver.wcoss2.sh new file mode 100755 index 000000000..abf239c87 --- /dev/null +++ b/reg_tests/grid_gen/driver.wcoss2.sh @@ -0,0 +1,111 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run grid generation consistency tests on WCOSS2. +# +# Set WORK_DIR to your working directory. Set the PROJECT_CODE and QUEUE +# as appropriate. +# +# Invoke the script with no arguments. A series of daily- +# chained jobs will be submitted. To check the queue, type: +# "qstat -u USERNAME". +# +# Log output from the suite will be in LOG_FILE. Once the suite +# has completed, a summary is placed in SUM_FILE. +# +# A test fails when its output does not match the baseline files as +# determined by the "nccmp" utility. The baseline files are stored in +# $HOMEreg. +# +#----------------------------------------------------------------------------- + +compiler=${compiler:-"intel"} + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +module use ../../modulefiles +module load build.$target.$compiler +module list + +set -x + +export WORK_DIR="${WORK_DIR:-/lfs/h2/emc/stmp/$LOGNAME}" +export WORK_DIR="${WORK_DIR}/reg-tests/grid-gen" +QUEUE="${QUEUE:-dev}" +PROJECT_CODE="${PROJECT_CODE:-GFS-DEV}" + +#----------------------------------------------------------------------------- +# Should not have to change anything below here. +#----------------------------------------------------------------------------- + +export UPDATE_BASELINE="FALSE" +#export UPDATE_BASELINE="TRUE" + +if [ "$UPDATE_BASELINE" = "TRUE" ]; then + source ../get_hash.sh +fi + +export NCCMP=/lfs/h2/emc/global/noscrub/George.Gayno/util/nccmp/nccmp-1.8.5.0/src/nccmp + +LOG_FILE=consistency.log +rm -f ${LOG_FILE} +SUM_FILE=summary.log +export home_dir=$PWD/../.. +export APRUN=time +export APRUN_SFC="mpiexec -n 30 -ppn 30 -cpu-bind core" +export OMP_STACKSIZE=2048m +export OMP_NUM_THREADS=30 # orog code uses threads +export OMP_PLACES=cores +export machine=WCOSS2 +export HOMEreg=/lfs/h2/emc/global/noscrub/George.Gayno/ufs_utils.git/reg_tests/grid_gen/baseline_data +this_dir=$PWD + +ulimit -a + +rm -fr $WORK_DIR + +#----------------------------------------------------------------------------- +# C96 uniform grid +#----------------------------------------------------------------------------- + +TEST1=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:10:00 \ + -N c96.uniform -l select=1:ncpus=30:mem=40GB $PWD/c96.uniform.sh) + +#----------------------------------------------------------------------------- +# C96 uniform grid using viirs vegetation data. +#----------------------------------------------------------------------------- + +TEST2=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:10:00 \ + -N c96.viirs.vegt -l select=1:ncpus=30:mem=40GB -W depend=afterok:$TEST1 $PWD/c96.viirs.vegt.sh) + +#----------------------------------------------------------------------------- +# gfdl regional grid +#----------------------------------------------------------------------------- + +TEST3=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:10:00 \ + -N gfdl.regional -l select=1:ncpus=30:mem=40GB -W depend=afterok:$TEST2 $PWD/gfdl.regional.sh) + +#----------------------------------------------------------------------------- +# esg regional grid +#----------------------------------------------------------------------------- + +TEST4=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:10:00 \ + -N esg.regional -l select=1:ncpus=30:mem=40GB -W depend=afterok:$TEST3 $PWD/esg.regional.sh) + +#----------------------------------------------------------------------------- +# Regional GSL gravity wave drag test. +#----------------------------------------------------------------------------- + +TEST5=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:10:00 \ + -N rsg.gsl.gwd -l select=1:ncpus=30:mem=40GB -W depend=afterok:$TEST4 $PWD/regional.gsl.gwd.sh) + +#----------------------------------------------------------------------------- +# Create summary log. +#----------------------------------------------------------------------------- + +qsub -V -o ${LOG_FILE} -e ${LOG_FILE} -q $QUEUE -A $PROJECT_CODE -l walltime=00:02:00 \ + -N grid_summary -l select=1:ncpus=1:mem=100MB -W depend=afterok:$TEST5 << EOF +#!/bin/bash +cd ${this_dir} +grep -a '<<<' $LOG_FILE | grep -v echo > $SUM_FILE +EOF diff --git a/reg_tests/ice_blend/driver.wcoss2.sh b/reg_tests/ice_blend/driver.wcoss2.sh new file mode 100755 index 000000000..cf85af081 --- /dev/null +++ b/reg_tests/ice_blend/driver.wcoss2.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run ice_blend consistency test on WCOSS2. +# +# Set $DATA to your working directory. Set the project code (PBS -A) +# and queue (PBS -q) as appropriate. +# +# Invoke the script as follows: qsub $script +# +# Log output is placed in consistency.log. A summary is +# placed in summary.log +# +# The test fails when its output does not match the baseline file +# as determined by the 'cmp' command. The baseline file is +# stored in HOMEreg. +# +#----------------------------------------------------------------------------- + +#PBS -l walltime=00:02:00 +#PBS -o consistency.log +#PBS -e consistency.log +#PBS -N iceb_regt +#PBS -q debug +#PBS -A GFS-DEV +#PBS -l select=1:ncpus=1:mem=2500MB + +cd $PBS_O_WORKDIR + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +module use ../../modulefiles +module load build.$target.intel +module load grib_util/1.2.3 +module load wgrib2/2.0.8 +module list + +set -x + +export DATA="${WORK_DIR:-/lfs/h2/emc/stmp/$LOGNAME}" +export DATA="${DATA}/reg-tests/ice-blend" + +#----------------------------------------------------------------------------- +# Should not have to change anything below. +#----------------------------------------------------------------------------- + +export UPDATE_BASELINE="FALSE" +#export UPDATE_BASELINE="TRUE" + +if [ "$UPDATE_BASELINE" = "TRUE" ]; then + source ../get_hash.sh +fi + +export HOMEreg=/lfs/h2/emc/global/noscrub/George.Gayno/ufs_utils.git/reg_tests/ice_blend +export HOMEgfs=$PBS_O_WORKDIR/../.. + +rm -fr $DATA + +./ice_blend.sh + +exit 0 diff --git a/reg_tests/rt.sh b/reg_tests/rt.sh index e533191b2..992e250d1 100755 --- a/reg_tests/rt.sh +++ b/reg_tests/rt.sh @@ -49,6 +49,13 @@ if [[ $target == "wcoss_dell_p3" ]] || [[ $target == "wcoss_cray" ]]; then if [[ "${this_letter}" == "${prod_letter}" ]]; then exit 0 fi +elif [[ $target == "wcoss2" ]]; then + this_machine=`cat /etc/cluster_name` + prod_machine=`grep primary /lfs/h1/ops/prod/config/prodmachinefile` + prod_machine=`echo ${prod_machine/primary:}` + if [[ "${this_machine}" == "${prod_machine}" ]]; then + exit 0 + fi fi # Set machine_id variable for running link_fixdirs @@ -67,6 +74,7 @@ cd fix cd ../reg_tests +#if [[ $target == "orion" ]] || [[ $target == "jet" ]] || [[ $target == "hera" ]] || [[ $target == "wcoss2" ]] ; then if [[ $target == "orion" ]] || [[ $target == "jet" ]] || [[ $target == "hera" ]] ; then cd cpld_gridgen @@ -119,6 +127,8 @@ for dir in ice_blend; do sbatch -A ${PROJECT_CODE} ./driver.$target.sh elif [[ $target == "wcoss_dell_p3" ]] || [[ $target == "wcoss_cray" ]]; then cat ./driver.$target.sh | bsub -P ${PROJECT_CODE} + elif [[ $target == "wcoss2" ]] ; then + qsub -v WORK_DIR ./driver.$target.sh fi # Wait for job to complete diff --git a/reg_tests/snow2mdl/driver.wcoss2.sh b/reg_tests/snow2mdl/driver.wcoss2.sh new file mode 100755 index 000000000..833ae5e3d --- /dev/null +++ b/reg_tests/snow2mdl/driver.wcoss2.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run snow2mdl consistency tests on WCOSS2. +# +# Set $DATA_ROOT to your working directory. Set the project code +# and queue as appropriate. +# +# Invoke the script as follows: ./$script +# +# Log output is placed in consistency.log. A summary is +# placed in summary.log +# +# The test fails when its output does not match the baseline file +# as determined by the 'cmp' command. The baseline files are +# stored in HOMEreg. +# +#----------------------------------------------------------------------------- + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +module use ../../modulefiles +module load build.$target.intel +module load grib_util/1.2.2 +module load wgrib2/2.0.8 +module list + +set -x + +export DATA_ROOT="${WORK_DIR:-/lfs/h2/emc/stmp/$LOGNAME}" +export DATA_ROOT="${DATA_ROOT}/reg-tests/snow2mdl" + +PROJECT_CODE=${PROJECT_CODE:-"GFS-DEV"} +QUEUE=${QUEUE:-"dev"} + +#----------------------------------------------------------------------------- +# Should not have to change anything below. +#----------------------------------------------------------------------------- + +export UPDATE_BASELINE="FALSE" +#export UPDATE_BASELINE="TRUE" + +if [ "$UPDATE_BASELINE" = "TRUE" ]; then + source ../get_hash.sh +fi + +export HOMEreg=/lfs/h2/emc/global/noscrub/George.Gayno/ufs_utils.git/reg_tests/snow2mdl +export HOMEgfs=$PWD/../.. + +LOG_FILE=consistency.log +SUM_FILE=summary.log + +rm -fr $DATA_ROOT + +#----------------------------------------------------------------------------- +# Test GFS ops snow. +#----------------------------------------------------------------------------- + +export DATA=$DATA_ROOT/test.ops +TEST1=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l select=1:ncpus=1:mem=2500MB \ + -N snow.ops -l walltime=00:03:00 $PWD/snow2mdl.ops.sh) + +#----------------------------------------------------------------------------- +# Test afwa global snow. +#----------------------------------------------------------------------------- + +export DATA=$DATA_ROOT/test.global +TEST2=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l select=1:ncpus=1:mem=2500MB \ + -N snow.global -l walltime=00:03:00 -W depend=afterok:$TEST1 $PWD/snow2mdl.global.sh) + +#----------------------------------------------------------------------------- +# Create summary log. +#----------------------------------------------------------------------------- + +this_dir=$PWD +qsub -V -o ${LOG_FILE} -e ${LOG_FILE} -q $QUEUE -A $PROJECT_CODE -l walltime=00:01:00 \ + -N snow_summary -l select=1:ncpus=1:mem=100MB -W depend=afterok:$TEST2 << EOF +#!/bin/bash +cd ${this_dir} +grep -a '<<<' $LOG_FILE | grep -v echo > $SUM_FILE +EOF +exit 0 diff --git a/scripts/exemcsfc_global_sfc_prep.sh b/scripts/exemcsfc_global_sfc_prep.sh index 7662402a9..9587f2a07 100755 --- a/scripts/exemcsfc_global_sfc_prep.sh +++ b/scripts/exemcsfc_global_sfc_prep.sh @@ -90,7 +90,7 @@ cd $DATA export SENDCOM=${SENDCOM:-"NO"} #----------------------------------------------------------------------- -# the "postmsg" and "err_exit" utilities are only used in ncep ops +# The "err_exit" utility is only used in ncep ops # when the "prod_util" module is loaded. #----------------------------------------------------------------------- @@ -134,10 +134,7 @@ export pgmout=${pgmout:-OUTPUT} # call utility script to create global ice blend data. #----------------------------------------------------------------------- -if test "$use_prod_util" = "true" ; then - msg="create blended ice data." - postmsg "$jlogfile" "$msg" -fi +echo "Create blended ice data." ${USHgfs}/emcsfc_ice_blend.sh rc=$? @@ -157,16 +154,10 @@ if ((rc != 0));then if test "$SENDCOM" = "YES" then if [ -s $BLENDED_ICE_FILE_m6hrs ]; then - msg="copy old ice blend file to current directory" - if test "$use_prod_util" = "true" ; then - postmsg "$jlogfile" "$msg" - fi + echo "Copy old ice blend file to current directory" cp $BLENDED_ICE_FILE_m6hrs $COMOUT/$BLENDED_ICE_FILE else - msg="FATAL ERROR: CURRENT AND 6-HR OLD ICE FILE MISSING" - if test "$use_prod_util" = "true" ; then - postmsg "$jlogfile" "$msg" - fi + echo "FATAL ERROR: CURRENT AND 6-HR OLD ICE FILE MISSING" if test "$use_prod_util" = "true" ; then err_exit else @@ -198,10 +189,7 @@ export GFS_LONSPERLAT_FILE=${LONSPERLAT:-$FIXgfs_am/global_lonsperlat.t${resolut export MODEL_SNOW_FILE=${FNSNOAJCAP:-${RUN}.${cycle}.snogrb_t${resolution}} export MODEL_SNOW_FILE_m6hrs=${FNSNOGJCAP:-${COMINgfs_m6hrs}/${RUN}.${cycle_m6hrs}.snogrb_t${resolution}} -if test "$use_prod_util" = "true" ; then - msg="create ${JCAP} snow data." - postmsg "$jlogfile" "$msg" -fi +echo "Create ${JCAP} snow data." ${USHgfs}/emcsfc_snow.sh rc=$? @@ -221,16 +209,10 @@ if ((rc != 0)); then if test "$SENDCOM" = "YES" then if [ -s $MODEL_SNOW_FILE_m6hrs ]; then - msg="COPY OLD ${JCAP} SNOW FILE TO CURRENT DIRECTORY" - if test "$use_prod_util" = "true" ; then - postmsg "$jlogfile" "$msg" - fi + echo "COPY OLD ${JCAP} SNOW FILE TO CURRENT DIRECTORY" cp $MODEL_SNOW_FILE_m6hrs $COMOUT/$MODEL_SNOW_FILE else - msg="FATAL ERROR: CURRENT AND 6-HR OLD ${JCAP} SNOW MISSING" - if test "$use_prod_util" = "true" ; then - postmsg "$jlogfile" "$msg" - fi + echo "FATAL ERROR: CURRENT AND 6-HR OLD ${JCAP} SNOW MISSING" if test "$use_prod_util" = "true" ; then err_exit else @@ -265,10 +247,7 @@ export GFS_LONSPERLAT_FILE=${LONSPERLAT_ENKF:-$FIXgfs_am/global_lonsperlat.t${re export MODEL_SNOW_FILE=${FNSNOAJCAP_ENKF:-${RUN}.${cycle}.snogrb_t${resolution}} export MODEL_SNOW_FILE_m6hrs=${FNSNOGJCAP_ENKF:-${COMINgfs_m6hrs}/${RUN}.${cycle_m6hrs}.snogrb_t${resolution}} -if test "$use_prod_util" = "true" ; then - msg="create enkf snow data." - postmsg "$jlogfile" "$msg" -fi +echo "Create enkf snow data." ${USHgfs}/emcsfc_snow.sh rc=$? @@ -282,16 +261,10 @@ if ((rc != 0)); then if test "$SENDCOM" = "YES" then if [ -s $MODEL_SNOW_FILE_m6hrs ]; then - msg="COPY OLD ENKF SNOW FILE TO CURRENT DIRECTORY" - if test "$use_prod_util" = "true" ; then - postmsg "$jlogfile" "$msg" - fi + echo "COPY OLD ENKF SNOW FILE TO CURRENT DIRECTORY" cp $MODEL_SNOW_FILE_m6hrs $COMOUT/$MODEL_SNOW_FILE else - msg="FATAL ERROR: CURRENT AND 6-HR OLD ENKF SNOW MISSING" - if test "$use_prod_util" = "true" ; then - postmsg "$jlogfile" "$msg" - fi + echo "FATAL ERROR: CURRENT AND 6-HR OLD ENKF SNOW MISSING" if test "$use_prod_util" = "true" ; then err_exit else diff --git a/sorc/machine-setup.sh b/sorc/machine-setup.sh index ad82266e3..26d40982f 100644 --- a/sorc/machine-setup.sh +++ b/sorc/machine-setup.sh @@ -27,6 +27,9 @@ if [[ -d /lfs3 ]] ; then fi target=jet module purge +elif [[ -d /lfs/h1 ]] ; then + target=wcoss2 + module reset elif [[ -d /scratch1 ]] ; then # We are on NOAA Hera if ( ! eval module help > /dev/null 2>&1 ) ; then diff --git a/ush/cpld_gridgen.sh b/ush/cpld_gridgen.sh index e0b2af0ed..bce186a62 100755 --- a/ush/cpld_gridgen.sh +++ b/ush/cpld_gridgen.sh @@ -18,7 +18,7 @@ function edit_namelist { -e "s/DO_POSTWGTS/$DO_POSTWGTS/g" } -export RESNAME=$1 +export RESNAME=${RESNAME:-$1} export DEBUG=.false. export MASKEDIT=.false. export DO_POSTWGTS=.false. @@ -95,6 +95,8 @@ if [ ! -d ${OUTDIR_PATH} ]; then mkdir -p ${OUTDIR_PATH} fi +cd ${OUTDIR_PATH} + edit_namelist < grid.nml.IN > grid.nml $APRUN ./cpld_gridgen diff --git a/ush/emcsfc_ice_blend.sh b/ush/emcsfc_ice_blend.sh index 9bceee83e..20ddbb373 100755 --- a/ush/emcsfc_ice_blend.sh +++ b/ush/emcsfc_ice_blend.sh @@ -45,8 +45,8 @@ if [[ "$VERBOSE" == YES ]]; then fi #----------------------------------------------------------------------- -# the "postmsg", "startmsg" and "prep_step" utilities -# are only used in ncep ops when the "prod_util" module is loaded. +# The "startmsg" and "prep_step" utilities are only +# used in ncep ops when the "prod_util" module is loaded. #----------------------------------------------------------------------- use_prod_util=`echo $UTILROOT` @@ -121,10 +121,7 @@ then grid173="0 0 0 0 0 0 0 0 4320 2160 0 0 89958000 42000 48 -89958000 359958000 83000 83000 0" $COPYGB2 -x -i3 -g "$grid173" ims.icec.grib2 ims.icec.5min.grib2 else - msg="WARNING in ${pgm}: IMS ice data missing. Can not run program." - if test "$use_prod_util" = "true" ; then - postmsg "$jlogfile" "$msg" - fi + echo "WARNING in ${pgm}: IMS ice data missing. Can not run program." exit 3 fi @@ -135,10 +132,7 @@ fi if [ ! -f ${FIVE_MIN_ICE_FILE} ] then - msg="WARNING in ${pgm}: MMAB ice data missing. Can not run program." - if test "$use_prod_util" = "true" ; then - postmsg "$jlogfile" "$msg" - fi + echo "WARNING in ${pgm}: MMAB ice data missing. Can not run program." exit 5 fi @@ -177,20 +171,14 @@ rc=$? if (( rc != 0 )) then - msg="WARNING: ${pgm} completed abnormally." - if test "$use_prod_util" = "true" ; then - postmsg "$jlogfile" "$msg" - fi + echo "WARNING: ${pgm} completed abnormally." exit $rc else $WGRIB2 -set_int 3 51 42000 ${BLENDED_ICE_FILE} -grib ${BLENDED_ICE_FILE}.corner $CNVGRIB -g21 ${BLENDED_ICE_FILE}.corner ${BLENDED_ICE_FILE}.bitmap rm $BLENDED_ICE_FILE $COPYGB -M "#1.57" -x ${BLENDED_ICE_FILE}.bitmap $BLENDED_ICE_FILE - msg="${pgm} completed normally." - if test "$use_prod_util" = "true" ; then - postmsg "$jlogfile" "$msg" - fi + echo "${pgm} completed normally." if [ "$SENDCOM" = "YES" ] ; then cp $BLENDED_ICE_FILE $COMOUT fi diff --git a/ush/emcsfc_snow.sh b/ush/emcsfc_snow.sh index a844cc317..8071a18b5 100755 --- a/ush/emcsfc_snow.sh +++ b/ush/emcsfc_snow.sh @@ -53,12 +53,10 @@ if [[ "$VERBOSE" == YES ]]; then fi #----------------------------------------------------------------------- -# the "postmsg", "startmsg" and "prep_step" utilities -# are only used in ncep ops when the "prod_util" module is loaded. +# The "startmsg" and "prep_step" utilities are only +# used in ncep ops when the "prod_util" module is loaded. #----------------------------------------------------------------------- -jlogfile=${jlogfile:-"jlogfile"} - use_prod_util=`echo $UTILROOT` if ((${#use_prod_util} != 0)); then use_prod_util="true" @@ -148,10 +146,7 @@ $WGRIB2 ${IMS_FILE} rc1=$? if ((rc1 != 0));then - msg="WARNING: ${pgm} detects corrupt IMS data. Can not run." - if test "$use_prod_util" = "true" ; then - postmsg "$jlogfile" "$msg" - fi + echo "WARNING: ${pgm} detects corrupt IMS data. Can not run." exit $rc1 fi @@ -223,16 +218,10 @@ eval $SNOW2MDLEXEC >> $pgmout 2> errfile rc2=$? if ((rc2!= 0));then - msg="WARNING: ${pgm} completed abnormally." - if test "$use_prod_util" = "true" ; then - postmsg "$jlogfile" "$msg" - fi + echo "WARNING: ${pgm} completed abnormally." exit $rc2 else - msg="${pgm} completed normally." - if test "$use_prod_util" = "true" ; then - postmsg "$jlogfile" "$msg" - fi + echo "${pgm} completed normally." if test "$SENDCOM" = "YES" then cp $MODEL_SNOW_FILE $COMOUT diff --git a/util/gdas_init/config b/util/gdas_init/config index ddfc7c025..5ca148541 100644 --- a/util/gdas_init/config +++ b/util/gdas_init/config @@ -47,17 +47,17 @@ # #----------------------------------------------------------- -EXTRACT_DIR=/lfs4/HFIP/emcda/$USER/stmp/gdas.init/input -EXTRACT_DATA=yes +EXTRACT_DIR=/lfs/h2/emc/stmp/$USER/gdas.init/input +EXTRACT_DATA=no -RUN_CHGRES=no +RUN_CHGRES=yes -yy=2021 -mm=03 -dd=21 +yy=2022 +mm=05 +dd=06 hh=06 -use_v16retro=yes +use_v16retro=no LEVS=65 @@ -68,7 +68,7 @@ CRES_ENKF=C96 UFS_DIR=$PWD/../.. -OUTDIR=/lfs4/HFIP/emcda/$USER/stmp/gdas.init/output +OUTDIR=/lfs/h2/emc/stmp/$USER/gdas.init/output #--------------------------------------------------------- # Dont touch anything below here. diff --git a/util/gdas_init/driver.wcoss2.sh b/util/gdas_init/driver.wcoss2.sh new file mode 100755 index 000000000..76a3bb329 --- /dev/null +++ b/util/gdas_init/driver.wcoss2.sh @@ -0,0 +1,235 @@ +#!/bin/bash + +#--------------------------------------------------------------------- +# Driver script for running on WCOSS2. +# +# Edit the 'config' file before running. +#--------------------------------------------------------------------- + +set -x + +compiler=${compiler:-"intel"} +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +module use ../../modulefiles +module load build.$target.$compiler +module list + +# Needed for NDATE utility +module load prod_util/2.0.8 + +PROJECT_CODE=GFS-DEV + +source config + +this_dir=$PWD + +if [ $EXTRACT_DATA == yes ]; then + + rm -fr $EXTRACT_DIR + mkdir -p $EXTRACT_DIR + + QUEUE=dev_transfer + MEM=2GB + WALLT="02:00:00" + + case $gfs_ver in + v12 | v13) + DATAH=$(qsub -V -o log.data.${CDUMP} -e log.data.${CDUMP} -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_${CDUMP} -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_pre-v14.data.sh ${CDUMP}) + DEPEND="-W depend=afterok:$DATAH" + if [ "$CDUMP" = "gdas" ] ; then + DATA1=$(qsub -V -o log.data.enkf -e log.data.enkf -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_enkf -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_pre-v14.data.sh enkf) + DEPEND="-W depend=afterok:$DATAH:$DATA1" + fi + ;; + v14) + DATAH=$(qsub -V -o log.data.${CDUMP} -e log.data.${CDUMP} -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_${CDUMP} -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v14.data.sh ${CDUMP}) + DEPEND="-W depend=afterok:$DATAH" + if [ "$CDUMP" = "gdas" ] ; then + DATA1=$(qsub -V -o log.data.enkf -e log.data.enkf -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_enkf -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v14.data.sh enkf) + DEPEND="-W depend=afterok:$DATAH:$DATA1" + fi + ;; + v15) + if [ "$CDUMP" = "gfs" ] ; then + DATAH=$(qsub -V -o log.data.${CDUMP} -e log.data.${CDUMP} -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_${CDUMP} -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v15.data.sh ${CDUMP}) + DEPEND="-W depend=afterok:$DATAH" + else + DATAH=$(qsub -V -o log.data.${CDUMP} -e log.data.${CDUMP} -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_${CDUMP} -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v15.data.sh ${CDUMP}) + DATA1=$(qsub -V -o log.data.grp1 -e log.data.grp1 -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_grp1 -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v15.data.sh grp1) + DATA2=$(qsub -V -o log.data.grp2 -e log.data.grp2 -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_grp2 -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v15.data.sh grp2) + DATA3=$(qsub -V -o log.data.grp3 -e log.data.grp3 -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_grp3 -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v15.data.sh grp3) + DATA4=$(qsub -V -o log.data.grp4 -e log.data.grp4 -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_grp4 -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v15.data.sh grp4) + DATA5=$(qsub -V -o log.data.grp5 -e log.data.grp5 -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_grp5 -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v15.data.sh grp5) + DATA6=$(qsub -V -o log.data.grp6 -e log.data.grp6 -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_grp6 -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v15.data.sh grp6) + DATA7=$(qsub -V -o log.data.grp7 -e log.data.grp7 -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_grp7 -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v15.data.sh grp7) + DATA8=$(qsub -V -o log.data.grp8 -e log.data.grp8 -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_grp8 -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v15.data.sh grp8) + DEPEND="-W depend=afterok:$DATAH:$DATA1:$DATA2:$DATA3:$DATA4:$DATA5:$DATA6:$DATA7:$DATA8" + fi + ;; + v16retro) + DATAH=$(qsub -V -o log.data.v16retro -e log.data.v16retro -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_v16retro -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v16retro.data.sh ${CDUMP}) + DEPEND="-W depend=afterok:$DATAH" + ;; + v16) + DATAH=$(qsub -V -o log.data.${CDUMP} -e log.data.${CDUMP} -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_${CDUMP} -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v16.data.sh ${CDUMP}) + DEPEND="-W depend=afterok:$DATAH" + if [ "$CDUMP" = "gdas" ] ; then + DATA1=$(qsub -V -o log.data.grp1 -e log.data.grp1 -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_grp1 -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v16.data.sh grp1) + DATA2=$(qsub -V -o log.data.grp2 -e log.data.grp2 -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_grp2 -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v16.data.sh grp2) + DATA3=$(qsub -V -o log.data.grp3 -e log.data.grp3 -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_grp3 -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v16.data.sh grp3) + DATA4=$(qsub -V -o log.data.grp4 -e log.data.grp4 -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_grp4 -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v16.data.sh grp4) + DATA5=$(qsub -V -o log.data.grp5 -e log.data.grp5 -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_grp5 -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v16.data.sh grp5) + DATA6=$(qsub -V -o log.data.grp6 -e log.data.grp6 -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_grp5 -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v16.data.sh grp6) + DATA7=$(qsub -V -o log.data.grp7 -e log.data.grp7 -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_grp7 -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v16.data.sh grp7) + DATA8=$(qsub -V -o log.data.grp8 -e log.data.grp8 -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_grp8 -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v16.data.sh grp8) + DEPEND="-W depend=afterok:$DATAH:$DATA1:$DATA2:$DATA3:$DATA4:$DATA5:$DATA6:$DATA7:$DATA8" + fi + ;; + esac + +else # do not extract data. + + DEPEND=' ' + +fi # extract data? + +if [ $RUN_CHGRES == yes ]; then + + QUEUE=dev + NODES=1 + TASKS_PER_NODE=24 + WALLT="0:15:00" + MEM=75GB + if [ $CRES_HIRES == 'C768' ] ; then + MEM=250GB + elif [ $CRES_HIRES == 'C1152' ] ; then + MEM=350GB + NODES=1 + TASKS_PER_NODE=48 + WALLT="0:20:00" + fi + NCPUS=${TASKS_PER_NODE} + (( TASKS = NODES * TASKS_PER_NODE )) + export APRUN="mpiexec -n $TASKS -ppn $TASKS_PER_NODE --cpu-bind core" + case $gfs_ver in + v12 | v13) + export OMP_NUM_THREADS=4 + export OMP_STACKSIZE=1024M + export OMP_PLACES=cores + export APRUN="$APRUN --depth ${OMP_NUM_THREADS}" + (( NCPUS = NCPUS * OMP_NUM_THREADS )) + qsub -V -l select=${NODES}:ncpus=${NCPUS}:ompthreads=${OMP_NUM_THREADS}:mem=${MEM} -l walltime=$WALLT -A $PROJECT_CODE -q $QUEUE \ + -N chgres_${CDUMP} -o log.${CDUMP} -e log.${CDUMP} ${DEPEND} -- ${this_dir}/run_pre-v14.chgres.sh ${CDUMP} + ;; + v14) + qsub -V -l select=${NODES}:ncpus=${NCPUS}:ompthreads=1:mem=${MEM} -l walltime=$WALLT -A $PROJECT_CODE -q $QUEUE \ + -N chgres_${CDUMP} -o log.${CDUMP} -e log.${CDUMP} ${DEPEND} -- ${this_dir}/run_v14.chgres.sh ${CDUMP} + ;; + v15) + if [ "$CDUMP" = "gdas" ]; then + qsub -V -l select=${NODES}:ncpus=${NCPUS}:ompthreads=1:mem=${MEM} -l walltime=$WALLT -A $PROJECT_CODE -q $QUEUE \ + -N chgres_${CDUMP} -o log.${CDUMP} -e log.${CDUMP} ${DEPEND} -- ${this_dir}/run_v15.chgres.sh ${CDUMP} + else + qsub -V -l select=${NODES}:ncpus=${NCPUS}:ompthreads=1:mem=${MEM} -l walltime=$WALLT -A $PROJECT_CODE -q $QUEUE \ + -N chgres_${CDUMP} -o log.${CDUMP} -e log.${CDUMP} ${DEPEND} ${this_dir}/run_v15.chgres.gfs.sh + fi + ;; + v16retro) + if [ "$CDUMP" = "gdas" ] ; then + qsub -V -l select=${NODES}:ncpus=${NCPUS}:ompthreads=1:mem=${MEM} -l walltime=$WALLT -A $PROJECT_CODE -q $QUEUE \ + -N chgres_${CDUMP} -o log.${CDUMP} -e log.${CDUMP} ${DEPEND} -- ${this_dir}/run_v16retro.chgres.sh hires + else + qsub -V -l select=${NODES}:ncpus=${NCPUS}:ompthreads=1:mem=${MEM} -l walltime=$WALLT -A $PROJECT_CODE -q $QUEUE \ + -N chgres_${CDUMP} -o log.${CDUMP} -e log.${CDUMP} ${DEPEND} -- ${this_dir}/run_v16.chgres.sh ${CDUMP} + fi + ;; + v16) + qsub -V -l select=${NODES}:ncpus=${NCPUS}:ompthreads=1:mem=${MEM} -l walltime=$WALLT -A $PROJECT_CODE -q $QUEUE \ + -N chgres_${CDUMP} -o log.${CDUMP} -e log.${CDUMP} ${DEPEND} -- ${this_dir}/run_v16.chgres.sh ${CDUMP} + ;; + esac + + if [ "$CDUMP" = "gdas" ]; then + + WALLT="0:15:00" + MEM=75GB + NODES=1 + TASKS_PER_NODE=12 + NCPUS=${TASKS_PER_NODE} + (( TASKS = NODES * TASKS_PER_NODE)) + export APRUN="mpiexec -n $TASKS -ppn $TASKS_PER_NODE --cpu-bind core" + + if [ "$gfs_ver" = "v16retro" ]; then + + qsub -V -l select=${NODES}:ncpus=${NCPUS}:ompthreads=1:mem=${MEM} -l walltime=$WALLT -A $PROJECT_CODE -q $QUEUE \ + -N chgres_enkf -o log.enkf -e log.enkf ${DEPEND} -- ${this_dir}/run_v16retro.chgres.sh enkf + + else + + case $gfs_ver in # use threads for v12/13 data. + v12 | v13) + export OMP_NUM_THREADS=2 + export OMP_STACKSIZE=1024M + export OMP_PLACES=cores + export APRUN="$APRUN --depth ${OMP_NUM_THREADS}" + (( NCPUS = NCPUS * OMP_NUM_THREADS )) + ;; + esac + + MEMBER=1 + while [ $MEMBER -le 80 ]; do + if [ $MEMBER -lt 10 ]; then + MEMBER_CH="00${MEMBER}" + else + MEMBER_CH="0${MEMBER}" + fi + case $gfs_ver in + v12 | v13) + qsub -V -l select=${NODES}:ncpus=${NCPUS}:ompthreads=${OMP_NUM_THREADS}:mem=${MEM} -l walltime=$WALLT -A $PROJECT_CODE -q $QUEUE \ + -N chgres_${MEMBER_CH} -o log.${MEMBER_CH} -e log.${MEMBER_CH} ${DEPEND} -- ${this_dir}/run_pre-v14.chgres.sh ${MEMBER_CH} + ;; + v14) + qsub -V -l select=${NODES}:ncpus=${NCPUS}:ompthreads=1:mem=${MEM} -l walltime=$WALLT -A $PROJECT_CODE -q $QUEUE \ + -N chgres_${MEMBER_CH} -o log.${MEMBER_CH} -e log.${MEMBER_CH} ${DEPEND} -- ${this_dir}/run_v14.chgres.sh ${MEMBER_CH} + ;; + v15) + qsub -V -l select=${NODES}:ncpus=${NCPUS}:ompthreads=1:mem=${MEM} -l walltime=$WALLT -A $PROJECT_CODE -q $QUEUE \ + -N chgres_${MEMBER_CH} -o log.${MEMBER_CH} -e log.${MEMBER_CH} ${DEPEND} -- ${this_dir}/run_v15.chgres.sh ${MEMBER_CH} + ;; + v16) + qsub -V -l select=${NODES}:ncpus=${NCPUS}:ompthreads=1:mem=${MEM} -l walltime=$WALLT -A $PROJECT_CODE -q $QUEUE \ + -N chgres_${MEMBER_CH} -o log.${MEMBER_CH} -e log.${MEMBER_CH} ${DEPEND} -- ${this_dir}/run_v16.chgres.sh ${MEMBER_CH} + ;; + esac + MEMBER=$(( $MEMBER + 1 )) + done + + fi # v16 retro? + + fi # which CDUMP? + +fi # run chgres? diff --git a/util/sfc_climo_gen/run.wcoss2.sh b/util/sfc_climo_gen/run.wcoss2.sh new file mode 100755 index 000000000..63e9c692f --- /dev/null +++ b/util/sfc_climo_gen/run.wcoss2.sh @@ -0,0 +1,86 @@ +#!/bin/bash + +#------------------------------------------------------------ +# Run the sfc_climo_gen program stand-alone on WCOSS2 using +# pre-exiting 'grid' and 'orography files. +# +# To run, type: 'qsub $script' +#------------------------------------------------------------ + +#PBS -o log +#PBS -e log +#PBS -q debug +#PBS -A GFS-DEV +#PBS -N grid_fv3 +#PBS -l walltime=00:10:00 +#PBS -l select=1:ncpus=24:mem=75GB + +set -x + +# Adjust according to the PBS -l statement. +export APRUN_SFC="mpiexec -n 24 -ppn 24 -cpu-bind core" + +export BASE_DIR=$PBS_O_WORKDIR/../.. + +source ${BASE_DIR}/sorc/machine-setup.sh > /dev/null 2>&1 +module use ${BASE_DIR}/modulefiles +module load build.$target.intel +module list + +#------------------------------------- +# Set model resolution. +#------------------------------------- + +export res=384 + +#------------------------------------- +# Where the model "grid", "mosaic" and "oro" files reside. +#------------------------------------- + +export FIX_FV3=${BASE_DIR}/fix/fix_fv3_gmted2010/C${res} + +#------------------------------------- +# Uncomment for regional grids. +#------------------------------------- + +##HALO=3 +##export GRIDTYPE=regional + +#------------------------------------- +# Choose which virrs data to use. +#------------------------------------- + +export veg_type_src="viirs.igbp.0.05" # Use global 0.05-degree viirs data +#export veg_type_src="viirs.igbp.0.1" # Use global 0.1-degree viirs data +#export veg_type_src="viirs.igbp.0.03" # Use global 0.03-degree viirs data +#export veg_type_src="viirs.igbp.conus.0.01" # Use CONUS 0.01-degree virrs data. Do not + # use for global grids. + +#------------------------------------- +# Set working directory and directory where output files will be saved. +#------------------------------------- + +export WORK_DIR=/lfs/h2/emc/stmp/$LOGNAME/work.sfc +export SAVE_DIR=/lfs/h2/emc/stmp/$LOGNAME/sfc.C${res} + +#------------------------------------- +# Should not have to touch anything below here. +#------------------------------------- + +if [[ $GRIDTYPE = "regional" ]]; then + HALO=$(( $HALO + 1 )) + export HALO + ln -fs $FIX_FV3/C${res}_grid.tile7.halo${HALO}.nc $FIX_FV3/C${res}_grid.tile7.nc + ln -fs $FIX_FV3/C${res}_oro_data.tile7.halo${HALO}.nc $FIX_FV3/C${res}_oro_data.tile7.nc +fi + +export input_sfc_climo_dir=${BASE_DIR}/fix/fix_sfc_climo + +ulimit -a +ulimit -s unlimited + +rm -fr $WORK_DIR $SAVE_DIR + +${BASE_DIR}/ush/sfc_climo_gen.sh + +exit diff --git a/util/vcoord_gen/run.sh b/util/vcoord_gen/run.sh index b0d5c3aff..bf8ed0852 100755 --- a/util/vcoord_gen/run.sh +++ b/util/vcoord_gen/run.sh @@ -3,7 +3,7 @@ #------------------------------------------------------------------------------- # # Generate a hybrid coordinate interface profile. On WCOSS-Cray, use -# 'run.cray.sh'. +# 'run.cray.sh'. On WCOSS2, use 'run.wcoss2.sh'. # # Build the repository using the ./build_all.sh script before running. # diff --git a/util/vcoord_gen/run.wcoss2.sh b/util/vcoord_gen/run.wcoss2.sh new file mode 100755 index 000000000..64160f69c --- /dev/null +++ b/util/vcoord_gen/run.wcoss2.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +#PBS -l walltime=00:02:00 +#PBS -o log +#PBS -e log +#PBS -N vcoord +#PBS -q debug +#PBS -A GFS-DEV +#PBS -l select=1:ncpus=1:mem=100MB + +#------------------------------------------------------------------------------- +# +# Generate a hybrid coordinate interface profile on WCOSS2. +# +# Build the repository using the ./build_all.sh script before running. +# +# Output 'ak' and 'bk' values are placed in $outfile. +# +# To run this script, do: 'qsub $script' +# +#------------------------------------------------------------------------------- + +cd $PBS_O_WORKDIR + +set -x + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +module use ../../modulefiles +module load build.$target.intel +module list + +outfile="./global_hyblev.txt" + +levs=128 # integer number of levels +lupp=88 # integer number of levels below pupp +pbot=100000.0 # real nominal surface pressure (Pa) +psig=99500.0 # real nominal pressure where coordinate changes + # from pure sigma (Pa) +ppre=7000.0 # real nominal pressure where coordinate changes + # to pure pressure (Pa) +pupp=7000.0 # real nominal pressure where coordinate changes + # to upper atmospheric profile (Pa) +ptop=0.0 # real pressure at top (Pa) +dpbot=240.0 # real coordinate thickness at bottom (Pa) +dpsig=1200.0 # real thickness of zone within which coordinate changes + # to pure sigma (Pa) +dppre=18000.0 # real thickness of zone within which coordinate changes + # to pure pressure (Pa) +dpupp=550.0 # real coordinate thickness at pupp (Pa) +dptop=1.0 # real coordinate thickness at top (Pa) + +rm -f $outfile + +echo $levs $lupp $pbot $psig $ppre $pupp $ptop $dpbot $dpsig $dppre $dpupp $dptop | ../../exec/vcoord_gen > $outfile + +exit