Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

land surface upgrades for p8c #499

Merged
merged 8 commits into from
Mar 29, 2022
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Merge remote-tracking branch 'upstream/develop' into lsm_upgrades_for…
…_p8c
HelinWei-NOAA committed Mar 20, 2022

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature. The key has expired.
commit ee0c342af5f47924329a1e7808446c55da26900b
5 changes: 4 additions & 1 deletion atmos_model.F90
Original file line number Diff line number Diff line change
@@ -975,7 +975,10 @@ subroutine atmos_model_end (Atmos)
endif

! Fast physics (from dynamics) are finalized in atmosphere_end above;
! standard/slow physics (from CCPP) are finalized in CCPP_step 'finalize'.
! standard/slow physics (from CCPP) are finalized in CCPP_step 'physics_finalize'.
call CCPP_step (step="physics_finalize", nblks=Atm_block%nblks, ierr=ierr)
if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP physics_finalize step failed')

! The CCPP framework for all cdata structures is finalized in CCPP_step 'finalize'.
call CCPP_step (step="finalize", nblks=Atm_block%nblks, ierr=ierr)
if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP finalize step failed')
13 changes: 0 additions & 13 deletions ccpp/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -13,10 +13,6 @@ if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "Coverage")
endif()

#------------------------------------------------------------------------------
# CMake Modules
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/framework/cmake")

#------------------------------------------------------------------------------
# Call to CCPP code generator
if(DEBUG)
@@ -54,14 +50,6 @@ if(MPI)
add_definitions(-DMPI)
endif()

#------------------------------------------------------------------------------
# Set additional flags for debug build
if(DEBUG)
if(CMAKE_Fortran_COMPILER_ID MATCHES "Intel")
set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -init=snan,arrays")
endif()
endif()

#------------------------------------------------------------------------------
# Set flag for 32bit dynamics build
if(32BIT)
@@ -117,7 +105,6 @@ add_library(
# Compile GFS_diagnostics.F90 without optimization, this leads to out of memory errors on wcoss_dell_p3
set_property(SOURCE driver/GFS_diagnostics.F90 APPEND_STRING PROPERTY COMPILE_FLAGS "-O0")


target_link_libraries(fv3ccpp PUBLIC ccpp_framework)
target_link_libraries(fv3ccpp PUBLIC ccpp_physics)

3 changes: 2 additions & 1 deletion ccpp/config/ccpp_prebuild_config.py
Original file line number Diff line number Diff line change
@@ -214,7 +214,7 @@

# Default build dir, relative to current working directory,
# if not specified as command-line argument
DEFAULT_BUILD_DIR = 'FV3'
DEFAULT_BUILD_DIR = 'build'

# Auto-generated makefile/cmakefile snippets that contain all type definitions
TYPEDEFS_MAKEFILE = '{build_dir}/physics/CCPP_TYPEDEFS.mk'
@@ -242,6 +242,7 @@
STATIC_API_SRCFILE = '{build_dir}/physics/CCPP_STATIC_API.sh'

# Directory for writing HTML pages generated from metadata files
# used by metadata2html.py for generating scientific documentation
METADATA_HTML_OUTPUT_DIR = '{build_dir}/physics/physics/docs'

# HTML document containing the model-defined CCPP variables
4 changes: 2 additions & 2 deletions ccpp/data/GFS_typedefs.F90
Original file line number Diff line number Diff line change
@@ -22,8 +22,8 @@ module GFS_typedefs

implicit none

! To ensure that these values match what's in the physics,
! array sizes are compared during model init in GFS_rrtmg_setup_init()
! To ensure that these values match what's in the physics, array
! sizes are compared in the auto-generated physics caps in debug mode
private :: NF_AESW, NF_AELW, NSPC, NSPC1, NF_CLDS, NF_VGAS, NF_ALBD, ntrcaerm
! from module_radiation_aerosols
integer, parameter :: NF_AESW = 3
48 changes: 22 additions & 26 deletions ccpp/driver/CCPP_driver.F90
Original file line number Diff line number Diff line change
@@ -101,8 +101,8 @@ subroutine CCPP_step (step, nblks, ierr)
else if (trim(step)=="physics_init") then

! Since the physics init step is independent of the blocking structure,
! we can use cdata_domain here. Since we don't use threading on the outside,
! we can allow threading inside the physics init routines.
! we can use cdata_domain. And since we don't use threading on the host
! model side, we can allow threading inside the physics init routines.
GFS_control%nthreads = nthrds

call ccpp_physics_init(cdata_domain, suite_name=trim(ccpp_suite), ierr=ierr)
@@ -116,8 +116,8 @@ subroutine CCPP_step (step, nblks, ierr)
else if (trim(step)=="timestep_init") then

! Since the physics timestep init step is independent of the blocking structure,
! we can use cdata_domain here. Since we don't use threading on the outside,
! we can allow threading inside the timestep init (time_vary) routines.
! we can use cdata_domain. And since we don't use threading on the host
! model side, we can allow threading inside the timestep init (time_vary) routines.
GFS_control%nthreads = nthrds

call ccpp_physics_timestep_init(cdata_domain, suite_name=trim(ccpp_suite), group_name="time_vary", ierr=ierr)
@@ -159,11 +159,11 @@ subroutine CCPP_step (step, nblks, ierr)
! *DH 20210104 !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

! Radiation and stochastic physics
! Radiation, physics and and stochastic physics - threaded regions using blocked data structures
else if (trim(step)=="radiation" .or. trim(step)=="physics" .or. trim(step)=="stochastics") then

! Set number of threads available to physics schemes to one,
! because threads are used on the outside for blocking
! because threads are used on the host model side for blocking
GFS_control%nthreads = 1

!$OMP parallel num_threads (nthrds) &
@@ -188,8 +188,8 @@ subroutine CCPP_step (step, nblks, ierr)
call ccpp_physics_run(cdata_block(nb,ntX), suite_name=trim(ccpp_suite), group_name=trim(step), ierr=ierr2)
if (ierr2/=0) then
write(0,'(2a,3(a,i4),a)') "An error occurred in ccpp_physics_run for group ", trim(step), &
", block ", nb, " and thread ", nt, " (ntX=", ntX, ")"
write(0,'(a)') trim(cdata_block(nb,nt)%errmsg)
", block ", nb, " and thread ", nt, " (ntX=", ntX, "):"
write(0,'(a)') trim(cdata_block(nb,ntX)%errmsg)
ierr = ierr + ierr2
end if
end do
@@ -202,7 +202,7 @@ subroutine CCPP_step (step, nblks, ierr)
else if (trim(step)=="timestep_finalize") then

! Since the physics timestep finalize step is independent of the blocking structure,
! we can use cdata_domain here. Since we don't use threading on the outside,
! we can use cdata_domain. And since we don't use threading on the host model side,
! we can allow threading inside the timestep finalize (time_vary) routines.
GFS_control%nthreads = nthrds

@@ -213,27 +213,23 @@ subroutine CCPP_step (step, nblks, ierr)
return
end if

! Finalize
else if (trim(step)=="finalize") then
! Physics finalize
else if (trim(step)=="physics_finalize") then

! Loop over blocks, don't use threading on the outside but allowing threading
! inside the finalization, similar to what is done for the initialization
! Since the physics finalize step is independent of the blocking structure,
! we can use cdata_domain. And since we don't use threading on the host
! model side, we can allow threading inside the physics finalize routines.
GFS_control%nthreads = nthrds

! Fast physics are finalized in atmosphere_end, loop over
! all blocks and threads to finalize all other physics
do nt=1,nthrdsX
do nb=1,nblks
!--- Finalize CCPP physics
call ccpp_physics_finalize(cdata_block(nb,nt), suite_name=trim(ccpp_suite), ierr=ierr)
if (ierr/=0) then
write(0,'(a,i4,a,i4)') "An error occurred in ccpp_physics_finalize for block ", nb, " and thread ", nt
write(0,'(a)') trim(cdata_block(nb,nt)%errmsg)
return
end if
end do
end do
call ccpp_physics_finalize(cdata_domain, suite_name=trim(ccpp_suite), ierr=ierr)
if (ierr/=0) then
write(0,'(a)') "An error occurred in ccpp_physics_finalize"
write(0,'(a)') trim(cdata_domain%errmsg)
return
end if

! Finalize
else if (trim(step)=="finalize") then
! Deallocate cdata structure for blocks and threads
if (allocated(cdata_block)) deallocate(cdata_block)

2 changes: 1 addition & 1 deletion ccpp/framework
2 changes: 1 addition & 1 deletion ccpp/physics
Submodule physics updated 220 files
96 changes: 96 additions & 0 deletions ccpp/suites/suite_FV3_GFS_v17_coupled_p8.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
<?xml version="1.0" encoding="UTF-8"?>

<suite name="FV3_GFS_v17_coupled_p8" version="1">
<!-- <init></init> -->
<group name="time_vary">
<subcycle loop="1">
<scheme>GFS_time_vary_pre</scheme>
<scheme>GFS_rrtmg_setup</scheme>
<scheme>GFS_rad_time_vary</scheme>
<scheme>GFS_phys_time_vary</scheme>
</subcycle>
</group>
<group name="radiation">
<subcycle loop="1">
<scheme>GFS_suite_interstitial_rad_reset</scheme>
<scheme>GFS_rrtmg_pre</scheme>
<scheme>GFS_radiation_surface</scheme>
<scheme>rrtmg_sw_pre</scheme>
<scheme>rrtmg_sw</scheme>
<scheme>rrtmg_sw_post</scheme>
<scheme>rrtmg_lw_pre</scheme>
<scheme>rrtmg_lw</scheme>
<scheme>rrtmg_lw_post</scheme>
<scheme>GFS_rrtmg_post</scheme>
</subcycle>
</group>
<group name="physics">
<subcycle loop="1">
<scheme>GFS_suite_interstitial_phys_reset</scheme>
<scheme>GFS_suite_stateout_reset</scheme>
<scheme>get_prs_fv3</scheme>
<scheme>GFS_suite_interstitial_1</scheme>
<scheme>GFS_surface_generic_pre</scheme>
<scheme>GFS_surface_composites_pre</scheme>
<scheme>dcyc2t3</scheme>
<scheme>GFS_surface_composites_inter</scheme>
<scheme>GFS_suite_interstitial_2</scheme>
</subcycle>
<!-- Surface iteration loop -->
<subcycle loop="2">
<scheme>sfc_diff</scheme>
<scheme>GFS_surface_loop_control_part1</scheme>
<scheme>sfc_nst_pre</scheme>
<scheme>sfc_nst</scheme>
<scheme>sfc_nst_post</scheme>
<scheme>noahmpdrv</scheme>
<scheme>sfc_cice</scheme>
<scheme>sfc_sice</scheme>
<scheme>GFS_surface_loop_control_part2</scheme>
</subcycle>
<!-- End of surface iteration loop -->
<subcycle loop="1">
<scheme>GFS_surface_composites_post</scheme>
<scheme>sfc_diag</scheme>
<scheme>sfc_diag_post</scheme>
<scheme>GFS_surface_generic_post</scheme>
<scheme>GFS_PBL_generic_pre</scheme>
<scheme>satmedmfvdifq</scheme>
<scheme>GFS_PBL_generic_post</scheme>
<scheme>GFS_GWD_generic_pre</scheme>
<scheme>unified_ugwp</scheme>
<scheme>unified_ugwp_post</scheme>
<scheme>GFS_GWD_generic_post</scheme>
<scheme>GFS_suite_stateout_update</scheme>
<scheme>ozphys_2015</scheme>
<scheme>h2ophys</scheme>
<scheme>get_phi_fv3</scheme>
<scheme>GFS_suite_interstitial_3</scheme>
<scheme>GFS_DCNV_generic_pre</scheme>
<scheme>samfdeepcnv</scheme>
<scheme>GFS_DCNV_generic_post</scheme>
<scheme>GFS_SCNV_generic_pre</scheme>
<scheme>samfshalcnv</scheme>
<scheme>GFS_SCNV_generic_post</scheme>
<scheme>GFS_suite_interstitial_4</scheme>
<scheme>cnvc90</scheme>
<scheme>GFS_MP_generic_pre</scheme>
<scheme>mp_thompson_pre</scheme>
</subcycle>
<subcycle loop="1">
<scheme>mp_thompson</scheme>
</subcycle>
<subcycle loop="1">
<scheme>mp_thompson_post</scheme>
<scheme>GFS_MP_generic_post</scheme>
<scheme>maximum_hourly_diagnostics</scheme>
</subcycle>
</group>
<group name="stochastics">
<subcycle loop="1">
<scheme>GFS_stochastics</scheme>
<scheme>phys_tend</scheme>
</subcycle>
</group>
<!-- <finalize></finalize> -->
</suite>
95 changes: 95 additions & 0 deletions ccpp/suites/suite_FV3_GFS_v17_p8.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
<?xml version="1.0" encoding="UTF-8"?>

<suite name="FV3_GFS_v17_p8" version="1">
<!-- <init></init> -->
<group name="time_vary">
<subcycle loop="1">
<scheme>GFS_time_vary_pre</scheme>
<scheme>GFS_rrtmg_setup</scheme>
<scheme>GFS_rad_time_vary</scheme>
<scheme>GFS_phys_time_vary</scheme>
</subcycle>
</group>
<group name="radiation">
<subcycle loop="1">
<scheme>GFS_suite_interstitial_rad_reset</scheme>
<scheme>GFS_rrtmg_pre</scheme>
<scheme>GFS_radiation_surface</scheme>
<scheme>rrtmg_sw_pre</scheme>
<scheme>rrtmg_sw</scheme>
<scheme>rrtmg_sw_post</scheme>
<scheme>rrtmg_lw_pre</scheme>
<scheme>rrtmg_lw</scheme>
<scheme>rrtmg_lw_post</scheme>
<scheme>GFS_rrtmg_post</scheme>
</subcycle>
</group>
<group name="physics">
<subcycle loop="1">
<scheme>GFS_suite_interstitial_phys_reset</scheme>
<scheme>GFS_suite_stateout_reset</scheme>
<scheme>get_prs_fv3</scheme>
<scheme>GFS_suite_interstitial_1</scheme>
<scheme>GFS_surface_generic_pre</scheme>
<scheme>GFS_surface_composites_pre</scheme>
<scheme>dcyc2t3</scheme>
<scheme>GFS_surface_composites_inter</scheme>
<scheme>GFS_suite_interstitial_2</scheme>
</subcycle>
<!-- Surface iteration loop -->
<subcycle loop="2">
<scheme>sfc_diff</scheme>
<scheme>GFS_surface_loop_control_part1</scheme>
<scheme>sfc_nst_pre</scheme>
<scheme>sfc_nst</scheme>
<scheme>sfc_nst_post</scheme>
<scheme>noahmpdrv</scheme>
<scheme>sfc_sice</scheme>
<scheme>GFS_surface_loop_control_part2</scheme>
</subcycle>
<!-- End of surface iteration loop -->
<subcycle loop="1">
<scheme>GFS_surface_composites_post</scheme>
<scheme>sfc_diag</scheme>
<scheme>sfc_diag_post</scheme>
<scheme>GFS_surface_generic_post</scheme>
<scheme>GFS_PBL_generic_pre</scheme>
<scheme>satmedmfvdifq</scheme>
<scheme>GFS_PBL_generic_post</scheme>
<scheme>GFS_GWD_generic_pre</scheme>
<scheme>unified_ugwp</scheme>
<scheme>unified_ugwp_post</scheme>
<scheme>GFS_GWD_generic_post</scheme>
<scheme>GFS_suite_stateout_update</scheme>
<scheme>ozphys_2015</scheme>
<scheme>h2ophys</scheme>
<scheme>get_phi_fv3</scheme>
<scheme>GFS_suite_interstitial_3</scheme>
<scheme>GFS_DCNV_generic_pre</scheme>
<scheme>samfdeepcnv</scheme>
<scheme>GFS_DCNV_generic_post</scheme>
<scheme>GFS_SCNV_generic_pre</scheme>
<scheme>samfshalcnv</scheme>
<scheme>GFS_SCNV_generic_post</scheme>
<scheme>GFS_suite_interstitial_4</scheme>
<scheme>cnvc90</scheme>
<scheme>GFS_MP_generic_pre</scheme>
<scheme>mp_thompson_pre</scheme>
</subcycle>
<subcycle loop="1">
<scheme>mp_thompson</scheme>
</subcycle>
<subcycle loop="1">
<scheme>mp_thompson_post</scheme>
<scheme>GFS_MP_generic_post</scheme>
<scheme>maximum_hourly_diagnostics</scheme>
</subcycle>
</group>
<group name="stochastics">
<subcycle loop="1">
<scheme>GFS_stochastics</scheme>
<scheme>phys_tend</scheme>
</subcycle>
</group>
<!-- <finalize></finalize> -->
</suite>
Loading
You are viewing a condensed version of this merge commit. You can view the full changes here.