diff --git a/cicecore/cicedyn/dynamics/ice_dyn_core1d.F90 b/cicecore/cicedyn/dynamics/ice_dyn_core1d.F90 index 6cbfbed11..f3f71b490 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_core1d.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_core1d.F90 @@ -82,7 +82,7 @@ subroutine stress_1d (ee, ne, se, lb, ub, & use ice_dyn_shared, only: arlx1i, denom1, revp, & deltaminEVP, visc_replpress - ! + ! implicit none ! arguments ------------------------------------------------------------------ integer (kind=int_kind), intent(in) :: lb,ub @@ -274,7 +274,7 @@ subroutine stress_1d (ee, ne, se, lb, ub, & ssigpw = stressp_2(iw) + stressp_3(iw) ssigp1 =(stressp_1(iw) + stressp_3(iw))*p055 ssigp2 =(stressp_2(iw) + stressp_4(iw))*p055 - + ssigmn = stressm_1(iw) + stressm_2(iw) ssigms = stressm_3(iw) + stressm_4(iw) ssigme = stressm_1(iw) + stressm_4(iw) @@ -396,7 +396,7 @@ subroutine strain_rates_1d (tmp_uvel_cc, tmp_vvel_cc, & real (kind=dbl_kind), intent(in) :: & tmp_uvel_ee, tmp_vvel_ee, tmp_uvel_se, tmp_vvel_se, & - tmp_uvel_cc, tmp_vvel_cc, tmp_uvel_ne, tmp_vvel_ne + tmp_uvel_cc, tmp_vvel_cc, tmp_uvel_ne, tmp_vvel_ne real (kind=dbl_kind), intent(in) :: & dxT , & ! width of T-cell through the middle (m) @@ -405,7 +405,7 @@ subroutine strain_rates_1d (tmp_uvel_cc, tmp_vvel_cc, & cxp , & ! 1.5*HTN - 0.5*HTS cym , & ! 0.5*HTE - 1.5*HTW cxm ! 0.5*HTN - 1.5*HTS - + real (kind=dbl_kind), intent(out):: & ! at each corner : divune, divunw, divuse, divusw , & ! divergence tensionne, tensionnw, tensionse, tensionsw, & ! tension @@ -479,7 +479,7 @@ subroutine stepu_1d (lb , ub , & uarear , & uvel_init, vvel_init, & uvel , vvel , & - str1 , str2 , & + str1 , str2 , & str3 , str4 , & str5 , str6 , & str7 , str8 , & @@ -569,7 +569,7 @@ subroutine stepu_1d (lb , ub , & ! revp = 0 for classic evp, 1 for revised evp cca = (brlx + revp)*umassdti(iw) + vrel * cosw + Cb(iw) ! kg/m^2 s ccb = fm(iw) + sign(c1,fm(iw)) * vrel * sinw ! kg/m^2 s - + ab2 = cca**2 + ccb**2 tmp_str2_nw = str2(nw(iw)) @@ -578,7 +578,7 @@ subroutine stepu_1d (lb , ub , & tmp_str6_sse = str6(sse(iw)) tmp_str7_nw = str7(nw(iw)) tmp_str8_sw = str8(sw(iw)) - + ! divergence of the internal stress tensor strintx = uarear(iw)*(str1(iw)+tmp_str2_nw+tmp_str3_sse+tmp_str4_sw) strinty = uarear(iw)*(str5(iw)+tmp_str6_sse+tmp_str7_nw+tmp_str8_sw) @@ -590,7 +590,7 @@ subroutine stepu_1d (lb , ub , & + umassdti(iw)*(brlx*vold + revp*vvel_init(iw)) uvel(iw) = (cca*cc1 + ccb*cc2) / ab2 ! m/s vvel(iw) = (cca*cc2 - ccb*cc1) / ab2 - + ! calculate seabed stress component for outputs ! only needed on last iteration. enddo diff --git a/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 b/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 index 3ee5d2a53..3b44aea09 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 @@ -134,9 +134,7 @@ subroutine init_evp call init_dyn_shared(dt_dyn) if (evp_algorithm == "shared_mem_1d" ) then - call dyn_evp1d_init(nx_global , ny_global, nx_block, ny_block, & - max_blocks, nghost , dyT , dxT , & - uarear , tmask , G_HTE , G_HTN) + call dyn_evp1d_init endif allocate( uocnU (nx_block,ny_block,max_blocks), & ! i ocean current (m/s) @@ -203,6 +201,7 @@ subroutine init_evp end subroutine init_evp +!======================================================================= #ifdef CICE_IN_NEMO ! Wind stress is set during this routine from the values supplied ! via NEMO (unless calc_strair is true). These values are supplied @@ -254,7 +253,7 @@ subroutine evp (dt) strain_rates_U, & iceTmask, iceUmask, iceEmask, iceNmask, & dyn_haloUpdate, fld2, fld3, fld4 - use ice_dyn_evp1d, only: dyn_evp1d_run + use ice_dyn_evp1d, only: dyn_evp1d_run real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -801,20 +800,20 @@ subroutine evp (dt) if (grid_ice == "B") then - if (evp_algorithm == "shared_mem_1d" ) then + if (evp_algorithm == "shared_mem_1d" ) then - call dyn_evp1d_run(stressp_1 , stressp_2, stressp_3 , stressp_4 , & - stressm_1 , stressm_2 , stressm_3 , stressm_4 , & - stress12_1, stress12_2, stress12_3, stress12_4, & - strength , & - cdn_ocnU , aiu , uocnU , vocnU , & - waterxU , wateryU , forcexU , forceyU , & - umassdti , fmU , strintxU , strintyU , & - Tbu , taubxU , taubyU , uvel , & - vvel , icetmask , iceUmask) + call dyn_evp1d_run(stressp_1 , stressp_2, stressp_3 , stressp_4 , & + stressm_1 , stressm_2 , stressm_3 , stressm_4 , & + stress12_1, stress12_2, stress12_3, stress12_4, & + strength , & + cdn_ocnU , aiu , uocnU , vocnU , & + waterxU , wateryU , forcexU , forceyU , & + umassdti , fmU , strintxU , strintyU , & + Tbu , taubxU , taubyU , uvel , & + vvel , icetmask , iceUmask) - else ! evp_algorithm == standard_2d (Standard CICE) - do ksub = 1,ndte ! subcycling + else ! evp_algorithm == standard_2d (Standard CICE) + do ksub = 1,ndte ! subcycling !$OMP PARALLEL DO PRIVATE(iblk,strtmp) SCHEDULE(runtime) do iblk = 1, nblocks @@ -868,26 +867,26 @@ subroutine evp (dt) uvel, vvel) enddo ! sub cycling - endif ! evp algorithm + endif ! evp algorithm - !----------------------------------------------------------------- - ! save quantities for mechanical redistribution - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! save quantities for mechanical redistribution + !----------------------------------------------------------------- - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - call deformations (nx_block , ny_block , & - icellT (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - cxp (:,:,iblk), cyp (:,:,iblk), & - cxm (:,:,iblk), cym (:,:,iblk), & - tarear (:,:,iblk), & - shear (:,:,iblk), divu (:,:,iblk), & - rdg_conv(:,:,iblk), rdg_shear(:,:,iblk) ) - enddo - !$OMP END PARALLEL DO + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + call deformations (nx_block , ny_block , & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + cxp (:,:,iblk), cyp (:,:,iblk), & + cxm (:,:,iblk), cym (:,:,iblk), & + tarear (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv(:,:,iblk), rdg_shear(:,:,iblk) ) + enddo + !$OMP END PARALLEL DO elseif (grid_ice == "C") then @@ -1079,8 +1078,8 @@ subroutine evp (dt) do ksub = 1,ndte ! subcycling - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks call stressCD_T (nx_block , ny_block , & icellT (iblk), & indxTi (:,iblk), indxTj (:,iblk), & @@ -1094,27 +1093,27 @@ subroutine evp (dt) stresspT (:,:,iblk), stressmT (:,:,iblk), & stress12T (:,:,iblk) ) - enddo - !$OMP END PARALLEL DO + enddo + !$OMP END PARALLEL DO - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_center, field_type_scalar, & - zetax2T, etax2T) + ! calls ice_haloUpdate, controls bundles and masks + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_center, field_type_scalar, & + zetax2T, etax2T) - if (visc_method == 'avg_strength') then + if (visc_method == 'avg_strength') then call grid_average_X2Y('S', strength, 'T', strengthU, 'U') - elseif (visc_method == 'avg_zeta') then + elseif (visc_method == 'avg_zeta') then call grid_average_X2Y('S', zetax2T , 'T', zetax2U , 'U') call grid_average_X2Y('S', etax2T , 'T', etax2U , 'U') - endif - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - !----------------------------------------------------------------- - ! strain rates at U point - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- + endif + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + !----------------------------------------------------------------- + ! strain rates at U point + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- call strain_rates_U (nx_block , ny_block , & icellU (iblk), & indxUi (:,iblk), indxUj (:,iblk), & diff --git a/cicecore/cicedyn/dynamics/ice_dyn_evp1d.F90 b/cicecore/cicedyn/dynamics/ice_dyn_evp1d.F90 index db0819fbb..1b7aaa267 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_evp1d.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_evp1d.F90 @@ -5,14 +5,17 @@ ! FIXME: For now it allocates all water point, which in most cases could be avoided. !=============================================================================== ! Created by Till Rasmussen (DMI), Mads Hvid Ribergaard (DMI), and Jacob W. Poulsen, Intel + module ice_dyn_evp1d + !- modules ------------------------------------------------------------------- use ice_kinds_mod + use ice_blocks, only: nx_block, ny_block, nghost use ice_constants use ice_communicate, only: my_task, master_task + use ice_domain_size, only: max_blocks, nx_global, ny_global use ice_fileunits, only: nu_diag use ice_exit, only: abort_ice - ! none so far ... !- directives ---------------------------------------------------------------- implicit none @@ -24,15 +27,12 @@ module ice_dyn_evp1d !- private routines ---------------------------------------------------------- !- private vars -------------------------------------------------------------- - ! nx and ny are module variables for arrays after gather (G_*) Dimension according to CICE is nx_global+2*nghost, - ! ny_global+2*nghost + ! nx and ny are module variables for arrays after gather (G_*) Dimension according to CICE is + ! nx_global+2*nghost, ny_global+2*nghost ! nactive are number of active points (both t and u). navel is number of active integer(kind=int_kind), save :: nx, ny, nActive, navel, nallocated - ! cice variables imported - integer(kind=int_kind), save :: nghost, nx_block, ny_block, max_block - - ! indexes + ! indexes integer(kind=int_kind), allocatable, dimension(:,:) :: iwidx logical(kind=log_kind), allocatable, dimension(:) :: skipTcell,skipUcell real (kind=dbl_kind), allocatable, dimension(:) :: HTE,HTN, HTEm1,HTNm1 @@ -53,87 +53,68 @@ module ice_dyn_evp1d halo_parent_outer_east , halo_parent_outer_west , & halo_parent_outer_north, halo_parent_outer_south, & halo_inner_east , halo_inner_west , & - halo_inner_north , halo_inner_south + halo_inner_north , halo_inner_south ! number of halo points (same for inner and outer) integer(kind=int_kind) :: & n_inner_east, n_inner_west, n_inner_north, n_inner_south - + +!============================================================================= contains +!============================================================================= +! module public subroutines +! In addition all water points are assumed to be active and allocated thereafter. +!============================================================================= - !============================================================================= - ! module public subroutines + subroutine dyn_evp1d_init - !============================================================================= - ! This follows CICE naming - ! In addition all water points are assumed to be active and allocated thereafter. - subroutine dyn_evp1d_init(nx_global, ny_global, cnx_block, cny_block, & - cmax_block, cnghost , & - L_dyT, L_dxT, L_uarear, L_tmask, G_HTE, G_HTN) + use ice_grid, only: dyT, dxT, uarear, tmask, G_HTE, G_HTN - ! Note that TMask is ocean/land implicit none - integer(kind=int_kind), intent(in) :: & - nx_global, ny_global, cnx_block, cny_block, cmax_block, cnghost - real(kind=dbl_kind) , dimension(:,:,:), intent(in) :: & - L_dyT, L_dxT, L_uarear - logical(kind=log_kind), dimension(:,:,:), intent(in) :: & - L_tmask + ! local variables - ! variables for Global arrays (domain size) - real(kind=dbl_kind) , dimension(:,:) , intent(inout), allocatable, optional :: G_HTE,G_HTN - real(kind=dbl_kind) , dimension(:,:) , allocatable :: G_dyT, G_dxT, G_uarear - logical(kind=log_kind), dimension(:,:) , allocatable :: G_tmask + real(kind=dbl_kind) , allocatable, dimension(:,:) :: G_dyT, G_dxT, G_uarear + logical(kind=log_kind), allocatable, dimension(:,:) :: G_tmask - character(len=*), parameter :: subname = '(dyn_evp1d_init)' - integer(kind=int_kind) :: ios, ierr + integer(kind=int_kind) :: ios, ierr - nx_block=cnx_block - ny_block=cny_block - nghost=cnghost - max_block=cmax_block + character(len=*), parameter :: subname = '(dyn_evp1d_init)' nx=nx_global+2*nghost ny=ny_global+2*nghost allocate(G_dyT(nx,ny),G_dxT(nx,ny),G_uarear(nx,ny),G_tmask(nx,ny),stat=ierr) - if (ierr/=0) then call abort_ice(subname//' ERROR: allocating', file=__FILE__, line=__LINE__) endif ! gather from blks to global - call gather_static(L_uarear, L_dxT, L_dyT, L_Tmask, & - G_uarear, G_dxT, G_dyT, G_Tmask) + call gather_static( uarear, dxT, dyT, tmask, & + G_uarear, G_dxT, G_dyT, G_tmask) ! calculate number of water points (T and U). Only needed for the static version - ! Tmask in ocean/ice + ! tmask in ocean/ice if (my_task == master_task) then - call halo_HTE_HTN(G_HTE) - call halo_HTE_HTN(G_HTN) - call calc_nActiveTU(G_Tmask,nActive) + call calc_nActiveTU(G_tmask,nActive) call evp1d_alloc_static_na(nActive) - call calc_2d_indices_init(nActive, G_Tmask) + call calc_2d_indices_init(nActive, G_tmask) call calc_navel(nActive, navel) call evp1d_alloc_static_navel(navel) call numainit(1,nActive,navel) call convert_2d_1d_init(nActive,G_HTE, G_HTN, G_uarear, G_dxT, G_dyT) call evp1d_alloc_static_halo() - deallocate(G_HTN,G_HTE,stat=ierr) - if (ierr/=0) then - call abort_ice(subname//' ERROR: deallocating', file=__FILE__, line=__LINE__) - endif endif - deallocate(G_dyT,G_dxT,G_uarear,G_tmask,stat=ierr) + deallocate(G_dyT,G_dxT,G_uarear,G_tmask,stat=ierr) if (ierr/=0) then call abort_ice(subname//' ERROR: deallocating', file=__FILE__, line=__LINE__) endif end subroutine dyn_evp1d_init - !============================================================================= +!============================================================================= + subroutine dyn_evp1d_run(L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , & L_stressm_1 , L_stressm_2 , L_stressm_3 , L_stressm_4 , & L_stress12_1, L_stress12_2, L_stress12_3, L_stress12_4, & @@ -153,21 +134,24 @@ subroutine dyn_evp1d_run(L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , implicit none + ! nx_block, ny_block, max_blocks real(kind=dbl_kind) , dimension(:,:,:), intent(inout) :: & L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , & L_stressm_1 , L_stressm_2 , L_stressm_3 , L_stressm_4 , & L_stress12_1, L_stress12_2, L_stress12_3, L_stress12_4, & L_strintxU , L_strintyU , L_uvel , L_vvel , & L_taubxU , L_taubyU - real(kind=dbl_kind) , dimension(:,:,:), intent(in) :: & L_strength , & L_cdn_ocn , L_aiu , L_uocn , L_vocn , & L_waterxU , L_wateryU , L_forcexU , L_forceyU, & - L_umassdti , L_fmU , L_Tbu + L_umassdti , L_fmU , L_Tbu + logical(kind=log_kind), dimension(:,:,:), intent(in) :: & + L_iceUmask , L_iceTmask - logical(kind=log_kind), dimension (:,:,:), intent(in) :: L_iceUmask, L_iceTmask + ! local variables + ! nx, ny real(kind=dbl_kind), dimension(nx,ny) :: & G_stressp_1 , G_stressp_2 , G_stressp_3 , G_stressp_4 , & G_stressm_1 , G_stressm_2 , G_stressm_3 , G_stressm_4 , & @@ -178,8 +162,8 @@ subroutine dyn_evp1d_run(L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , G_umassdti , G_fmU , G_strintxU , G_strintyU , & G_Tbu , G_uvel , G_vvel , G_taubxU , & G_taubyU ! G_taubxU and G_taubyU are post processed from Cb - - logical(kind=log_kind), dimension (nx,ny) :: G_iceUmask, G_iceTmask + logical(kind=log_kind), dimension (nx,ny) :: & + G_iceUmask , G_iceTmask character(len=*), parameter :: subname = '(dyn_evp1d_run)' @@ -194,7 +178,7 @@ subroutine dyn_evp1d_run(L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , L_strength, & L_cdn_ocn , L_aiu , L_uocn , L_vocn , & L_waterxU , L_wateryU , L_forcexU , L_forceyU , & - L_umassdti , L_fmU , & + L_umassdti , L_fmU , & L_Tbu , L_uvel , L_vvel , & L_icetmask , L_iceUmask , & G_stressp_1 , G_stressp_2 , G_stressp_3 , G_stressp_4 , & @@ -223,7 +207,7 @@ subroutine dyn_evp1d_run(L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , call calc_halo_parent(Nactive,navel) ! map from cpu to gpu (to) and back. - ! This could be optimized considering which variables change from time step to time step + ! This could be optimized considering which variables change from time step to time step ! and which are constant. ! in addition initialization of Cb and str1, str2, str3, str4, str5, str6, str7, str8 call icepack_query_parameters(rhow_out=rhow) @@ -272,8 +256,8 @@ subroutine dyn_evp1d_run(L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , call evp1d_halo_update() enddo ! This can be skipped if diagnostics of strintx and strinty is not needed - ! They will either both be calculated or not. - call calc_diag_1d(1 , nActive , & + ! They will either both be calculated or not. + call calc_diag_1d(1 , nActive , & uarear , skipUcell, & str1 , str2 , & str3 , str4 , & @@ -281,7 +265,7 @@ subroutine dyn_evp1d_run(L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , str7 , str8 , & nw , sw , & sse , & - strintxU, strintyU) + strintxU, strintyU) call ice_timer_stop(timer_evp1dcore) @@ -306,7 +290,7 @@ subroutine dyn_evp1d_run(L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , L_stressm_1 , L_stressm_2 , L_stressm_3 , L_stressm_4 , & L_stress12_1, L_stress12_2, L_stress12_3, L_stress12_4, & L_strintxU , L_strintyU , L_uvel , L_vvel , & - L_taubxU , L_taubyU , & + L_taubxU , L_taubyU , & G_stressp_1 , G_stressp_2 , G_stressp_3 , G_stressp_4 , & G_stressm_1 , G_stressm_2 , G_stressm_3 , G_stressm_4 , & G_stress12_1, G_stress12_2, G_stress12_3, G_stress12_4, & @@ -320,17 +304,19 @@ subroutine dyn_evp1d_run(L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , ! call init_unionTU(nx, ny, iceTmask_log,iceUmask) ! else if (nactiveold < nActive) then ! write(nu_diag,*) 'Warning nActive is bigger than old allocation. Need to re allocate' - ! call evp_1d_dealloc() ! only deallocate if not first time step + ! call evp_1d_dealloc() ! only deallocate if not first time step ! call evp_1d_alloc(nActive, nActive,nx,ny) ! nactiveold=nActive+buf1d ! allocate ! call init_unionTU(nx, ny, iceTmask_log,iceUmask) ! endif - ! call cp_2dto1d(nActive) + ! call cp_2dto1d(nActive) ! FIXME THIS IS THE LOGIC FOR RE ALLOCATION IF NEEDED ! call add_1d(nx, ny, natmp, iceTmask_log, iceUmask, ts) + end subroutine dyn_evp1d_run - !============================================================================= +!============================================================================= + subroutine dyn_evp1d_finalize() implicit none @@ -342,7 +328,8 @@ subroutine dyn_evp1d_finalize() end subroutine dyn_evp1d_finalize - !============================================================================= +!============================================================================= + subroutine evp1d_alloc_static_na(na0) implicit none @@ -415,15 +402,16 @@ subroutine evp1d_alloc_static_na(na0) Tbu (1:na0), Cb (1:na0), & uvel_init(1:na0), vvel_init(1:na0), & stat=ierr) - + if (ierr/=0) then call abort_ice(subname//' ERROR: allocating', file=__FILE__, line=__LINE__) endif - end subroutine evp1d_alloc_static_na + end subroutine evp1d_alloc_static_na + +!============================================================================= - !============================================================================= - subroutine evp1d_alloc_static_navel(navel0) + subroutine evp1d_alloc_static_navel(navel0) implicit none integer(kind=int_kind), intent(in) :: navel0 @@ -442,7 +430,8 @@ subroutine evp1d_alloc_static_navel(navel0) end subroutine evp1d_alloc_static_navel - !============================================================================= +!============================================================================= + subroutine evp1d_alloc_static_halo() implicit none @@ -450,7 +439,7 @@ subroutine evp1d_alloc_static_halo() character(len=*), parameter :: subname = '(evp1d_alloc_static_halo)' ! allocation of arrays to use for halo - ! These are the size of one of the dimensions of the global grid but they could be + ! These are the size of one of the dimensions of the global grid but they could be ! reduced in size as only the number of active U points are used. ! Points to send data from are in the "inner" vectors. Data in outer points are named "outer" @@ -472,8 +461,10 @@ subroutine evp1d_alloc_static_halo() end subroutine evp1d_alloc_static_halo - !============================================================================= +!============================================================================= + subroutine calc_nActiveTU(Tmask,na0, Umask) + ! Calculate number of active points with a given mask. implicit none @@ -486,24 +477,26 @@ subroutine calc_nActiveTU(Tmask,na0, Umask) na0=0 if (present(Umask)) then do i=1+nghost,nx - do j=1+nghost,ny - if ((Tmask(i,j)) .or. (Umask(i,j))) then + do j=1+nghost,ny + if ((Tmask(i,j)) .or. (Umask(i,j))) then na0=na0+1 - endif - enddo + endif + enddo enddo else do i=1+nghost,nx - do j=1+nghost,ny - if (Tmask(i,j)) then + do j=1+nghost,ny + if (Tmask(i,j)) then na0=na0+1 - endif - enddo + endif + enddo enddo endif + end subroutine calc_nActiveTU - !============================================================================= +!============================================================================= + subroutine set_skipMe(iceTmask, iceUmask,na0) implicit none @@ -529,9 +522,11 @@ subroutine set_skipMe(iceTmask, iceUmask,na0) if (j == ny) skipUcell(iw)=.true. enddo ! write(nu_diag,*) 'number of points and Active points', na0, niw + end subroutine set_skipMe - !============================================================================= +!============================================================================= + subroutine calc_2d_indices_init(na0, Tmask) ! All points are active. Need to find neighbors. ! This should include de selection of u points. @@ -539,7 +534,9 @@ subroutine calc_2d_indices_init(na0, Tmask) implicit none integer(kind=int_kind), intent(in) :: na0 - logical(kind=log_kind), dimension(nx, ny), intent(in) :: Tmask + ! nx, ny + logical(kind=log_kind), dimension(:,:), intent(in) :: Tmask + ! local variables integer(kind=int_kind) :: i, j, Nmaskt @@ -550,17 +547,19 @@ subroutine calc_2d_indices_init(na0, Tmask) Nmaskt = 0 ! NOTE: T mask includes northern and eastern ghost cells do j = 1 + nghost, ny - do i = 1 + nghost, nx - if (Tmask(i,j)) then - Nmaskt = Nmaskt + 1 - indxti(Nmaskt) = i - indxtj(Nmaskt) = j - end if - end do + do i = 1 + nghost, nx + if (Tmask(i,j)) then + Nmaskt = Nmaskt + 1 + indxti(Nmaskt) = i + indxtj(Nmaskt) = j + end if + end do end do + end subroutine calc_2d_indices_init - !============================================================================= +!============================================================================= + subroutine union(x, y, xdim, ydim, xy, nxy) ! Find union (xy) of two sorted integer vectors (x and y), i.e. @@ -570,7 +569,9 @@ subroutine union(x, y, xdim, ydim, xy, nxy) integer(kind=int_kind), intent(in) :: x(1:xdim), y(1:ydim) integer(kind=int_kind), intent(out) :: xy(1:xdim + ydim) integer(kind=int_kind), intent(out) :: nxy + ! local variables + integer(kind=int_kind) :: i, j, k character(len=*), parameter :: subname = '(union)' @@ -604,22 +605,28 @@ subroutine union(x, y, xdim, ydim, xy, nxy) k = k + 1 enddo nxy = k - 1 + end subroutine union - !============================================================================= - subroutine gather_static(L_uarear, L_dxT, L_dyT,L_Tmask, G_uarear,G_dxT,G_dyT, G_Tmask) +!============================================================================= + + subroutine gather_static(L_uarear, L_dxT, L_dyT, L_Tmask, & + G_uarear, G_dxT, G_dyT, G_Tmask) + ! In standalone distrb_info is an integer. Not needed anyway use ice_communicate, only : master_task use ice_gather_scatter, only : gather_global_ext use ice_domain, only : distrb_info implicit none - real(kind=dbl_kind) , dimension(nx_block, ny_block, max_block), intent(in) :: & - L_uarear, L_dxT, L_dyT + ! nx_block, ny_block, max_blocks + real(kind=dbl_kind) , dimension(:,:,:), intent(in) :: L_uarear, L_dxT, L_dyT + logical(kind=log_kind), dimension(:,:,:), intent(in) :: L_Tmask + + ! nx, ny + real(kind=dbl_kind) , dimension(:,:), intent(out) :: G_uarear, G_dxT, G_dyT + logical(kind=log_kind), dimension(:,:), intent(out) :: G_Tmask - logical(kind=log_kind), dimension(nx_block, ny_block, max_block), intent(in) :: L_Tmask - real(kind=dbl_kind) , dimension(:, :), intent(out) :: G_uarear,G_dxT,G_dyT - logical(kind=log_kind), dimension(:, :), intent(out) :: G_Tmask character(len=*), parameter :: subname = '(gather_static)' ! copy from distributed I_* to G_* @@ -627,9 +634,11 @@ subroutine gather_static(L_uarear, L_dxT, L_dyT,L_Tmask, G_uarear,G_dxT,G_dyT, G call gather_global_ext(G_dxT , L_dxT , master_task, distrb_info) call gather_global_ext(G_dyT , L_dyT , master_task, distrb_info) call gather_global_ext(G_Tmask , L_Tmask , master_task, distrb_info) + end subroutine gather_static - !============================================================================= +!============================================================================= + subroutine gather_dyn(L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , & L_stressm_1 , L_stressm_2 , L_stressm_3 , L_stressm_4 , & L_stress12_1, L_stress12_2, L_stress12_3,L_stress12_4 , & @@ -654,30 +663,32 @@ subroutine gather_dyn(L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , & use ice_domain, only : distrb_info implicit none - real(kind=dbl_kind) , dimension(nx_block, ny_block, max_block), intent(in) :: & - L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4, & - L_stressm_1 , L_stressm_2 , L_stressm_3 , L_stressm_4, & - L_stress12_1, L_stress12_2, L_stress12_3,L_stress12_4, & - L_strength, & - L_cdn_ocn , L_aiu , L_uocn , L_vocn , & - L_waterxU , L_wateryU , L_forcexU , L_forceyU , & - L_umassdti , L_fmU , & + ! nx_block, ny_block, max_blocks + real(kind=dbl_kind) , dimension(:,:,:), intent(in) :: & + L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , & + L_stressm_1 , L_stressm_2 , L_stressm_3 , L_stressm_4 , & + L_stress12_1, L_stress12_2, L_stress12_3, L_stress12_4, & + L_strength , & + L_cdn_ocn , L_aiu , L_uocn , L_vocn , & + L_waterxU , L_wateryU , L_forcexU , L_forceyU , & + L_umassdti , L_fmU , & L_Tbu , L_uvel , L_vvel + logical(kind=log_kind), dimension(:,:,:), intent(in) :: & + L_iceUmask , L_iceTmask - logical(kind=log_kind), dimension(nx_block, ny_block, max_block), intent(in) :: & - L_iceUmask, L_iceTmask - - real(kind=dbl_kind) , dimension(nx, ny) , intent(out) :: & - G_stressp_1 , G_stressp_2 , G_stressp_3 , G_stressp_4, & - G_stressm_1 , G_stressm_2 , G_stressm_3 , G_stressm_4, & - G_stress12_1, G_stress12_2, G_stress12_3,G_stress12_4, & - G_strength, & - G_cdn_ocn , G_aiu , G_uocn , G_vocn , & - G_waterxU , G_wateryU , G_forcexU , G_forceyU , & - G_umassdti , G_fmU , & + ! nx, ny + real(kind=dbl_kind) , dimension(:,:), intent(out) :: & + G_stressp_1 , G_stressp_2 , G_stressp_3 , G_stressp_4 , & + G_stressm_1 , G_stressm_2 , G_stressm_3 , G_stressm_4 , & + G_stress12_1, G_stress12_2, G_stress12_3, G_stress12_4, & + G_strength, & + G_cdn_ocn , G_aiu , G_uocn , G_vocn , & + G_waterxU , G_wateryU , G_forcexU , G_forceyU , & + G_umassdti , G_fmU , & G_Tbu , G_uvel , G_vvel + logical(kind=log_kind), dimension(:,:), intent(out) :: & + G_iceUmask , G_iceTmask - logical(kind=log_kind), dimension(nx, ny), intent(out) :: G_iceUmask, G_iceTmask character(len=*), parameter :: subname = '(gather_dyn)' ! copy from distributed I_* to G_* @@ -685,7 +696,7 @@ subroutine gather_dyn(L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , & call gather_global_ext(G_stressp_2 , L_stressp_2, master_task, distrb_info,c0) call gather_global_ext(G_stressp_3 , L_stressp_3, master_task, distrb_info,c0) call gather_global_ext(G_stressp_4 , L_stressp_4, master_task, distrb_info,c0) - + call gather_global_ext(G_stressm_1 , L_stressm_1, master_task, distrb_info,c0) call gather_global_ext(G_stressm_2 , L_stressm_2, master_task, distrb_info,c0) call gather_global_ext(G_stressm_3 , L_stressm_3, master_task, distrb_info,c0) @@ -718,7 +729,8 @@ subroutine gather_dyn(L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , & end subroutine gather_dyn - !============================================================================= +!============================================================================= + subroutine scatter_dyn(L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , & L_stressm_1 , L_stressm_2 , L_stressm_3 , L_stressm_4 , & L_stress12_1, L_stress12_2, L_stress12_3, L_stress12_4, & @@ -735,21 +747,24 @@ subroutine scatter_dyn(L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , & use ice_domain, only : distrb_info implicit none - real(kind=dbl_kind), dimension(nx_block, ny_block, max_block), intent(out) :: & - L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , & - L_stressm_1 , L_stressm_2 , L_stressm_3 , L_stressm_4 , & - L_stress12_1, L_stress12_2, L_stress12_3, L_stress12_4, & - L_strintxU , L_strintyU , L_uvel , L_vvel , & + ! nx_block, ny_block, max_blocks + real(kind=dbl_kind), dimension(:,:,:), intent(out) :: & + L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , & + L_stressm_1 , L_stressm_2 , L_stressm_3 , L_stressm_4 , & + L_stress12_1, L_stress12_2, L_stress12_3, L_stress12_4, & + L_strintxU , L_strintyU , L_uvel , L_vvel , & L_taubxU , L_taubyU - real(kind=dbl_kind), dimension(nx, ny), intent(in) :: & + ! nx, ny + real(kind=dbl_kind), dimension(:,:), intent(in) :: & G_stressp_1 , G_stressp_2 , G_stressp_3 , G_stressp_4 , & G_stressm_1 , G_stressm_2 , G_stressm_3 , G_stressm_4 , & G_stress12_1, G_stress12_2, G_stress12_3, G_stress12_4, & G_strintxU , G_strintyU , G_uvel , G_vvel , & G_taubxU , G_taubyU + character(len=*), parameter :: subname = '(scatter_dyn)' - + call scatter_global_ext(L_stressp_1, G_stressp_1, master_task, distrb_info) call scatter_global_ext(L_stressp_2, G_stressp_2, master_task, distrb_info) call scatter_global_ext(L_stressp_3, G_stressp_3, master_task, distrb_info) @@ -774,13 +789,15 @@ subroutine scatter_dyn(L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , & end subroutine scatter_dyn - !============================================================================= +!============================================================================= + subroutine convert_2d_1d_init(na0, G_HTE, G_HTN, G_uarear, G_dxT, G_dyT) implicit none integer(kind=int_kind), intent(in) :: na0 real (kind=dbl_kind), dimension(:, :), intent(in) :: G_HTE, G_HTN, G_uarear, G_dxT, G_dyT + ! local variables integer(kind=int_kind) :: iw, lo, up, j, i @@ -849,9 +866,11 @@ subroutine convert_2d_1d_init(na0, G_HTE, G_HTN, G_uarear, G_dxT, G_dyT) HTEm1(iw) = G_HTE(i - 1, j) HTNm1(iw) = G_HTN(i, j - 1) end do + end subroutine convert_2d_1d_init - !============================================================================= +!============================================================================= + subroutine convert_2d_1d_dyn(na0 , & G_stressp_1 , G_stressp_2 , G_stressp_3 , G_stressp_4 , & G_stressm_1 , G_stressm_2 , G_stressm_3 , G_stressm_4 , & @@ -864,6 +883,8 @@ subroutine convert_2d_1d_dyn(na0 , implicit none integer(kind=int_kind), intent(in) :: na0 + + ! nx, ny real(kind=dbl_kind), dimension(:, :), intent(in) :: & G_stressp_1 , G_stressp_2 , G_stressp_3 , G_stressp_4, & G_stressm_1 , G_stressm_2 , G_stressm_3 , G_stressm_4, & @@ -909,7 +930,7 @@ subroutine convert_2d_1d_dyn(na0 , strintxU(iw) = C0 strintyU(iw) = C0 Tbu(iw) = G_Tbu(i, j) - Cb(iw) = c0 + Cb(iw) = c0 uvel(iw) = G_uvel(i,j) vvel(iw) = G_vvel(i,j) uvel_init(iw) = G_uvel(i,j) @@ -923,9 +944,11 @@ subroutine convert_2d_1d_dyn(na0 , uvel(iw)=G_uvel(i,j) vvel(iw)=G_vvel(i,j) end do + end subroutine convert_2d_1d_dyn - !============================================================================= +!============================================================================= + subroutine convert_1d_2d_dyn(na0 , navel0 , & G_stressp_1 , G_stressp_2 , G_stressp_3 , G_stressp_4 , & G_stressm_1 , G_stressm_2 , G_stressm_3 , G_stressm_4 , & @@ -940,6 +963,7 @@ subroutine convert_1d_2d_dyn(na0 , navel0 , implicit none integer(kind=int_kind), intent(in) :: na0, navel0 + ! nx, ny real(kind=dbl_kind), dimension(:, :), intent(inout) :: & G_stressp_1 , G_stressp_2 , G_stressp_3 , G_stressp_4 , & G_stressm_1 , G_stressm_2 , G_stressm_3 , G_stressm_4 , & @@ -980,16 +1004,18 @@ subroutine convert_1d_2d_dyn(na0 , navel0 , G_uvel(i,j) = uvel(iw) G_vvel(i,j) = vvel(iw) end do - + do iw=na0+1,navel0 j = int((indxTij(iw) - 1) / (nx)) + 1 i = indxTij(iw) - (j - 1) * nx G_uvel(i,j) = uvel(iw) G_vvel(i,j) = vvel(iw) - end do + end do + end subroutine convert_1d_2d_dyn - !======================================================================= +!======================================================================= + subroutine setdiff(x, y, lvecx, lvecy,xy, nxy) ! Find element (xy) of two sorted integer vectors (x and y) that ! are in x, but not in y, or in y, but not in x @@ -1037,9 +1063,11 @@ subroutine setdiff(x, y, lvecx, lvecy,xy, nxy) k = k + 1 end do nxy = k - 1 + end subroutine setdiff - !======================================================================= +!======================================================================= + subroutine findXinY(x, y, lvecx, lvecy, indx) ! Find indx vector so that x(1:na) = y(indx(1:na)) ! @@ -1085,7 +1113,8 @@ subroutine findXinY(x, y, lvecx, lvecy, indx) end subroutine findXinY - !======================================================================= +!======================================================================= + subroutine calc_navel(na0, navel0) ! Calculate number of active points, including halo points @@ -1127,10 +1156,11 @@ subroutine calc_navel(na0, navel0) call union(util1, Inw , i , na0, util2, j ) call union(util2, Isw , j , na0, util1, i ) call union(util1, Isse, i , na0, util2, navel0) - + end subroutine calc_navel - !======================================================================= +!======================================================================= + subroutine numainit(lo,up,uu) implicit none @@ -1202,9 +1232,11 @@ subroutine numainit(lo,up,uu) str8(iw)=c0 enddo !$omp end parallel do + end subroutine numainit - !======================================================================= +!======================================================================= + subroutine evp1d_halo_update() implicit none @@ -1234,7 +1266,8 @@ subroutine evp1d_halo_update() end subroutine evp1d_halo_update - !======================================================================= +!======================================================================= + subroutine calc_halo_parent(na0,navel0) ! splits the global domain in east and west boundary and find the inner (within) the domain and the outer (outside the domain) ! Implementation for circular boundaries. This means that mathes between the opposite directions must be found @@ -1251,7 +1284,7 @@ subroutine calc_halo_parent(na0,navel0) ! Indexes, Directions are east, weast, north and south ! This is done to reduce the search windows. ! Iw runs from 1 to navel and the one to keep in the end - ! Iw_inner_{direction} contains the indexes for + ! Iw_inner_{direction} contains the indexes for integer(kind=int_kind) :: & iw, n_outer_east, n_outer_west, n_outer_south, n_outer_north @@ -1259,8 +1292,8 @@ subroutine calc_halo_parent(na0,navel0) integer(kind=int_kind) :: i, j, ifind, jfind ! 2d index. ifind and jfind are points on the boundary integer(kind=int_kind), dimension(ny) :: & - halo_outer_east, halo_outer_west, & - ind_inner_west , ind_inner_east + halo_outer_east, halo_outer_west, & + ind_inner_west , ind_inner_east integer(kind=int_kind), dimension(nx) :: & halo_outer_south, halo_outer_north, & @@ -1429,41 +1462,7 @@ subroutine calc_halo_parent(na0,navel0) end subroutine calc_halo_parent - subroutine halo_HTE_HTN(ARRAY) - - ! This subroutine adds ghost cells to HTE and HTN - - use ice_domain, only: ew_boundary_type, ns_boundary_type - - real (kind=dbl_kind), dimension(:,:), intent(inout) :: & - ARRAY - - character(len=*), parameter :: & - subname = '(halo_HTE_HTN)' - - if (ns_boundary_type =='cyclic') then - array(:,1) = array(:,ny-1) - array(:,ny) = array(:,2) - elseif (ns_boundary_type == 'open') then - array(:,1) = array(:,2) - array(:,ny) = array(:,ny-1) - else - array(:,1) = c0 - array(:,ny) = c0 - endif - - if (ew_boundary_type =='cyclic') then - array(1,:) = array(nx-1,:) - array(nx,:) = array(2,:) - elseif (ew_boundary_type == 'open') then - array(1,:) = array(2,:) - array(nx,:) = array(nx-1,:) - else - array(1,:) = c0 - array(nx,:) = c0 - endif - - end subroutine halo_HTE_HTN +!======================================================================= end module ice_dyn_evp1d diff --git a/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 b/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 index 04ea6c674..a41f842ce 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 @@ -2310,7 +2310,7 @@ subroutine visc_replpress(strength, DminArea, Delta, & DminArea ! real (kind=dbl_kind), intent(in):: & - Delta + Delta real (kind=dbl_kind), intent(out):: & zetax2 , & ! bulk viscosity diff --git a/cicecore/cicedyn/dynamics/ice_transport_remap.F90 b/cicecore/cicedyn/dynamics/ice_transport_remap.F90 index 64140f273..dd59efc87 100644 --- a/cicecore/cicedyn/dynamics/ice_transport_remap.F90 +++ b/cicecore/cicedyn/dynamics/ice_transport_remap.F90 @@ -308,12 +308,12 @@ subroutine init_remap ! regions are adjusted to obtain the desired area. ! If false, edgearea is computed in locate_triangles and passed out. ! - ! l_fixed_area = .false. has been the default approach in CICE. It is - ! used like this for the B-grid. However, idealized tests with the - ! C-grid have shown that l_fixed_area = .false. leads to a checkerboard - ! pattern in prognostic fields (e.g. aice). Using l_fixed_area = .true. + ! l_fixed_area = .false. has been the default approach in CICE. It is + ! used like this for the B-grid. However, idealized tests with the + ! C-grid have shown that l_fixed_area = .false. leads to a checkerboard + ! pattern in prognostic fields (e.g. aice). Using l_fixed_area = .true. ! eliminates the checkerboard pattern in C-grid simulations. - ! + ! !------------------------------------------------------------------- if (grid_ice == 'CD' .or. grid_ice == 'C') then @@ -1726,7 +1726,7 @@ subroutine locate_triangles (nx_block, ny_block, & dpy , & ! y coordinates of departure points at cell corners dxu , & ! E-W dimension of U-cell (m) dyu , & ! N-S dimension of U-cell (m) - earea , & ! area of E-cell + earea , & ! area of E-cell narea ! area of N-cell real (kind=dbl_kind), dimension (nx_block,ny_block,0:nvert,ngroups), intent(out) :: & @@ -1763,8 +1763,8 @@ subroutine locate_triangles (nx_block, ny_block, & ishift_br, jshift_br , & ! i,j indices of BR cell relative to edge ishift_tc, jshift_tc , & ! i,j indices of TC cell relative to edge ishift_bc, jshift_bc , & ! i,j indices of BC cell relative to edge - is_l, js_l , & ! i,j shifts for TL1, BL2 for area consistency - is_r, js_r , & ! i,j shifts for TR1, BR2 for area consistency + is_l, js_l , & ! i,j shifts for TL1, BL2 for area consistency + is_r, js_r , & ! i,j shifts for TR1, BR2 for area consistency ise_tl, jse_tl , & ! i,j of TL other edge relative to edge ise_bl, jse_bl , & ! i,j of BL other edge relative to edge ise_tr, jse_tr , & ! i,j of TR other edge relative to edge @@ -1872,7 +1872,7 @@ subroutine locate_triangles (nx_block, ny_block, & areafac_c(:,:) = c0 areafac_ce(:,:) = c0 - + do ng = 1, ngroups do j = 1, ny_block do i = 1, nx_block @@ -1909,7 +1909,7 @@ subroutine locate_triangles (nx_block, ny_block, & ishift_bc = 0 jshift_bc = 0 - ! index shifts for TL1, BL2, TR1 and BR2 for area consistency + ! index shifts for TL1, BL2, TR1 and BR2 for area consistency is_l = -1 js_l = 0 @@ -1937,7 +1937,7 @@ subroutine locate_triangles (nx_block, ny_block, & enddo ! area scale factor for other edge (east) - + do j = 1, ny_block do i = 1, nx_block areafac_ce(i,j) = earea(i,j) @@ -1961,7 +1961,7 @@ subroutine locate_triangles (nx_block, ny_block, & ishift_bc = 0 jshift_bc = 0 - ! index shifts for TL1, BL2, TR1 and BR2 for area consistency + ! index shifts for TL1, BL2, TR1 and BR2 for area consistency is_l = 0 js_l = 1 @@ -2115,11 +2115,11 @@ subroutine locate_triangles (nx_block, ny_block, & !------------------------------------------------------------------- ! Locate triangles in TL cell (NW for north edge, NE for east edge) ! and BL cell (W for north edge, N for east edge). - ! + ! ! areafact_c or areafac_ce (areafact_c for the other edge) are used - ! (with shifted indices) to make sure that a flux area on one edge - ! is consistent with the analogous area on the other edge and to - ! ensure that areas add up when using l_fixed_area = T. See PR #849 + ! (with shifted indices) to make sure that a flux area on one edge + ! is consistent with the analogous area on the other edge and to + ! ensure that areas add up when using l_fixed_area = T. See PR #849 ! for details. ! !------------------------------------------------------------------- @@ -2477,7 +2477,7 @@ subroutine locate_triangles (nx_block, ny_block, & iflux (i,j,ng) = i + ishift_tc jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_c(i,j) - + ! TC2a (group 5) ng = 5 @@ -2490,7 +2490,7 @@ subroutine locate_triangles (nx_block, ny_block, & iflux (i,j,ng) = i + ishift_tc jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_c(i,j) - + ! TC3a (group 6) ng = 6 diff --git a/cicecore/cicedyn/general/ice_init.F90 b/cicecore/cicedyn/general/ice_init.F90 index 7343de4d2..051f41807 100644 --- a/cicecore/cicedyn/general/ice_init.F90 +++ b/cicecore/cicedyn/general/ice_init.F90 @@ -105,7 +105,7 @@ subroutine input_data grid_ocn, grid_ocn_thrm, grid_ocn_dynu, grid_ocn_dynv, & grid_atm, grid_atm_thrm, grid_atm_dynu, grid_atm_dynv, & dxrect, dyrect, dxscale, dyscale, scale_dxdy, & - lonrefrect, latrefrect, pgl_global_ext + lonrefrect, latrefrect, save_ghte_ghtn use ice_dyn_shared, only: ndte, kdyn, revised_evp, yield_curve, & evp_algorithm, visc_method, & seabed_stress, seabed_stress_method, & @@ -375,7 +375,7 @@ subroutine input_data ndte = 120 ! subcycles per dynamics timestep: ndte=dt_dyn/dte evp_algorithm = 'standard_2d' ! EVP kernel (standard_2d=standard cice evp; shared_mem_1d=1d shared memory and no mpi elasticDamp = 0.36_dbl_kind ! coefficient for calculating the parameter E - pgl_global_ext = .false. ! if true, init primary grid lengths (global ext.) + save_ghte_ghtn = .false. ! if true, save global hte and htn (global ext.) brlx = 300.0_dbl_kind ! revised_evp values. Otherwise overwritten in ice_dyn_shared arlx = 300.0_dbl_kind ! revised_evp values. Otherwise overwritten in ice_dyn_shared revised_evp = .false. ! if true, use revised procedure for evp dynamics @@ -874,10 +874,6 @@ subroutine input_data #else if (trim(diag_type) == 'file') call get_fileunit(nu_diag) #endif - ! If 1d evp dynamics then set variable in order to avoid circular dependencies with dyn_shared - if ((kdyn == 1) .and. (evp_algorithm == 'shared_mem_1d')) then - pgl_global_ext = .true. - endif !----------------------------------------------------------------- ! broadcast namelist settings @@ -966,7 +962,6 @@ subroutine input_data call broadcast_scalar(ndte, master_task) call broadcast_scalar(evp_algorithm, master_task) call broadcast_scalar(elasticDamp, master_task) - call broadcast_scalar(pgl_global_ext, master_task) call broadcast_scalar(brlx, master_task) call broadcast_scalar(arlx, master_task) call broadcast_scalar(revised_evp, master_task) @@ -1260,6 +1255,10 @@ subroutine input_data abort_list = trim(abort_list)//":5" endif + if (kdyn == 1 .and. evp_algorithm == 'shared_mem_1d') then + save_ghte_ghtn = .true. + endif + if (kdyn == 2 .and. revised_evp) then if (my_task == master_task) then write(nu_diag,*) subname//' WARNING: revised_evp = T with EAP dynamics' diff --git a/cicecore/cicedyn/general/ice_step_mod.F90 b/cicecore/cicedyn/general/ice_step_mod.F90 index 8ea6aa90e..b738e670b 100644 --- a/cicecore/cicedyn/general/ice_step_mod.F90 +++ b/cicecore/cicedyn/general/ice_step_mod.F90 @@ -757,7 +757,7 @@ subroutine update_state (dt, daidt, dvidt, dagedt, offset) use ice_state, only: aicen, trcrn, vicen, vsnon, & aice, trcr, vice, vsno, aice0, trcr_depend, & bound_state, trcr_base, nt_strata, n_trcr_strata - use ice_flux, only: Tf + use ice_flux, only: Tf use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound, timer_updstate real (kind=dbl_kind), intent(in) :: & diff --git a/cicecore/cicedyn/infrastructure/comm/mpi/ice_boundary.F90 b/cicecore/cicedyn/infrastructure/comm/mpi/ice_boundary.F90 index c36ee424e..a33e050b9 100644 --- a/cicecore/cicedyn/infrastructure/comm/mpi/ice_boundary.F90 +++ b/cicecore/cicedyn/infrastructure/comm/mpi/ice_boundary.F90 @@ -7166,6 +7166,8 @@ subroutine ice_HaloDestroy(halo) end subroutine ice_HaloDestroy +!*********************************************************************** + end module ice_boundary !||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| diff --git a/cicecore/cicedyn/infrastructure/comm/serial/ice_boundary.F90 b/cicecore/cicedyn/infrastructure/comm/serial/ice_boundary.F90 index d23d48450..b9ac8fe33 100644 --- a/cicecore/cicedyn/infrastructure/comm/serial/ice_boundary.F90 +++ b/cicecore/cicedyn/infrastructure/comm/serial/ice_boundary.F90 @@ -4911,6 +4911,8 @@ subroutine ice_HaloDestroy(halo) end subroutine ice_HaloDestroy +!*********************************************************************** + end module ice_boundary !||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| diff --git a/cicecore/cicedyn/infrastructure/ice_grid.F90 b/cicecore/cicedyn/infrastructure/ice_grid.F90 index 440403522..5f1b37ff1 100644 --- a/cicecore/cicedyn/infrastructure/ice_grid.F90 +++ b/cicecore/cicedyn/infrastructure/ice_grid.F90 @@ -25,11 +25,16 @@ module ice_grid use ice_kinds_mod use ice_broadcast, only: broadcast_scalar, broadcast_array use ice_boundary, only: ice_HaloUpdate, ice_HaloExtrapolate + use ice_constants, only: c0, c1, c1p5, c2, c4, c20, c360, & + p5, p25, radius, cm_to_m, m_to_cm, & + field_loc_center, field_loc_NEcorner, field_loc_Nface, field_loc_Eface, & + field_type_scalar, field_type_vector, field_type_angle use ice_communicate, only: my_task, master_task use ice_blocks, only: block, get_block, nx_block, ny_block, nghost use ice_domain_size, only: nx_global, ny_global, max_blocks use ice_domain, only: blocks_ice, nblocks, halo_info, distrb_info, & - ew_boundary_type, ns_boundary_type, init_domain_distribution + ew_boundary_type, ns_boundary_type, init_domain_distribution, & + close_boundaries use ice_fileunits, only: nu_diag, nu_grid, nu_kmt, & get_fileunit, release_fileunit, flush_fileunit use ice_gather_scatter, only: gather_global, scatter_global @@ -43,8 +48,9 @@ module ice_grid implicit none private - public :: init_grid1, init_grid2, grid_average_X2Y, & - alloc_grid, makemask, grid_neighbor_min, grid_neighbor_max + public :: init_grid1, init_grid2, grid_average_X2Y, makemask, & + alloc_grid, dealloc_grid, & + grid_neighbor_min, grid_neighbor_max character (len=char_len_long), public :: & grid_format , & ! file format ('bin'=binary or 'nc'=netcdf) @@ -179,7 +185,7 @@ module ice_grid logical (kind=log_kind), public :: & use_bathymetry, & ! flag for reading in bathymetry_file - pgl_global_ext, & ! flag for init primary grid lengths (global ext.) + save_ghte_ghtn, & ! flag for saving global hte and htn during initialization scale_dxdy ! flag to apply scale factor to vary dx/dy in rectgrid logical (kind=log_kind), dimension (:,:,:), allocatable, public :: & @@ -287,7 +293,7 @@ subroutine alloc_grid mse (2,2,nx_block,ny_block,max_blocks), & msw (2,2,nx_block,ny_block,max_blocks), & stat=ierr) - if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory') + if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory1') if (grid_ice == 'CD' .or. grid_ice == 'C') then allocate( & @@ -296,23 +302,46 @@ subroutine alloc_grid ratiodxNr(nx_block,ny_block,max_blocks), & ratiodyEr(nx_block,ny_block,max_blocks), & stat=ierr) - if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory') + if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory2') endif - if (pgl_global_ext) then + if (save_ghte_ghtn) then if (my_task == master_task) then allocate( & G_HTE(nx_global+2*nghost, ny_global+2*nghost), & ! length of eastern edge of T-cell (global ext.) G_HTN(nx_global+2*nghost, ny_global+2*nghost), & ! length of northern edge of T-cell (global ext.) stat=ierr) - if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory') + else + allocate( & + G_HTE(1,1), & ! needed for debug checks + G_HTN(1,1), & ! never used in code + stat=ierr) endif + if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory3') endif end subroutine alloc_grid !======================================================================= +! +! DeAllocate space for variables no longer needed after initialization +! + subroutine dealloc_grid + + integer (int_kind) :: ierr + + character(len=*), parameter :: subname = '(dealloc_grid)' + + if (save_ghte_ghtn) then + deallocate(G_HTE, G_HTN, stat=ierr) + if (ierr/=0) call abort_ice(subname//'ERROR: Dealloc error1') + endif + + end subroutine dealloc_grid + +!======================================================================= + ! Distribute blocks across processors. The distribution is optimized ! based on latitude and topography, contained in the ULAT and KMT arrays. ! @@ -320,10 +349,6 @@ end subroutine alloc_grid subroutine init_grid1 - use ice_blocks, only: nx_block, ny_block - use ice_broadcast, only: broadcast_array - use ice_constants, only: c1 - integer (kind=int_kind) :: & fid_grid, & ! file id for netCDF grid file fid_kmt ! file id for netCDF kmt file @@ -446,11 +471,6 @@ end subroutine init_grid1 subroutine init_grid2 - use ice_blocks, only: get_block, block, nx_block, ny_block - use ice_constants, only: c0, c1, c2, p5, p25, c1p5, & - field_loc_center, field_loc_NEcorner, field_loc_Nface, field_loc_Eface, & - field_type_scalar, field_type_vector, field_type_angle - use ice_domain_size, only: max_blocks #if defined (_OPENMP) use OMP_LIB #endif @@ -798,12 +818,6 @@ end subroutine init_grid2 subroutine popgrid - use ice_blocks, only: nx_block, ny_block - use ice_constants, only: c0, c1, p5, & - field_loc_center, field_loc_NEcorner, & - field_type_scalar, field_type_angle - use ice_domain_size, only: max_blocks - integer (kind=int_kind) :: & i, j, iblk, & ilo,ihi,jlo,jhi ! beginning and end of physical domain @@ -917,11 +931,6 @@ end subroutine popgrid subroutine popgrid_nc - use ice_blocks, only: nx_block, ny_block - use ice_constants, only: c0, c1, & - field_loc_center, field_loc_NEcorner, & - field_type_scalar, field_type_angle - use ice_domain_size, only: max_blocks #ifdef USE_NETCDF use netcdf #endif @@ -1088,11 +1097,7 @@ end subroutine popgrid_nc subroutine latlongrid -! use ice_boundary - use ice_domain_size use ice_scam, only : scmlat, scmlon, single_column - use ice_constants, only: c0, c1, p5, p25, & - field_loc_center, field_type_scalar, radius #ifdef USE_NETCDF use netcdf #endif @@ -1372,10 +1377,6 @@ end subroutine latlongrid subroutine rectgrid - use ice_constants, only: c0, c1, c2, radius, cm_to_m, & - field_loc_center, field_loc_NEcorner, field_type_scalar - use ice_domain, only: close_boundaries - integer (kind=int_kind) :: & i, j, & imid, jmid @@ -1571,8 +1572,6 @@ subroutine rectgrid_scale_dxdy ! generate a variable spaced rectangluar grid. ! extend spacing from center of grid outward. - use ice_constants, only: c0, c1, c2, radius, cm_to_m, & - field_loc_center, field_loc_NEcorner, field_type_scalar integer (kind=int_kind) :: & i, j, iblk, & @@ -1736,8 +1735,6 @@ end subroutine rectgrid_scale_dxdy subroutine grid_boxislands_kmt (work) - use ice_constants, only: c0, c1, c20 - real (kind=dbl_kind), dimension(:,:), intent(inout) :: work integer (kind=int_kind) :: & @@ -1871,11 +1868,6 @@ end subroutine grid_boxislands_kmt subroutine cpomgrid - use ice_blocks, only: nx_block, ny_block - use ice_constants, only: c0, c1, m_to_cm, & - field_loc_NEcorner, field_type_scalar - use ice_domain_size, only: max_blocks - integer (kind=int_kind) :: & i, j, iblk, & ilo,ihi,jlo,jhi ! beginning and end of physical domain @@ -1977,10 +1969,6 @@ end subroutine cpomgrid subroutine primary_grid_lengths_HTN(work_g) - use ice_constants, only: p25, p5, c2, cm_to_m, & - field_loc_center, field_loc_NEcorner, & - field_loc_Nface, field_type_scalar - real (kind=dbl_kind), dimension(:,:) :: work_g ! global array holding HTN ! local variables @@ -2016,12 +2004,13 @@ subroutine primary_grid_lengths_HTN(work_g) work_g2(i,j) = p5*(work_g(i,j) + work_g(ip1,j)) ! dxU enddo enddo - if (pgl_global_ext) then + if (save_ghte_ghtn) then do j = 1, ny_global - do i = 1,nx_global - G_HTN(i+nghost,j+nghost) = work_g(i,j) - enddo + do i = 1,nx_global + G_HTN(i+nghost,j+nghost) = work_g(i,j) enddo + enddo + call global_ext_halo(G_HTN) endif endif call scatter_global(HTN, work_g, master_task, distrb_info, & @@ -2085,10 +2074,6 @@ end subroutine primary_grid_lengths_HTN subroutine primary_grid_lengths_HTE(work_g) - use ice_constants, only: p25, p5, c2, cm_to_m, & - field_loc_center, field_loc_NEcorner, & - field_loc_Eface, field_type_scalar - real (kind=dbl_kind), dimension(:,:) :: work_g ! global array holding HTE ! local variables @@ -2127,12 +2112,13 @@ subroutine primary_grid_lengths_HTE(work_g) work_g2(i,ny_global) = c2*work_g(i,ny_global-1) - work_g(i,ny_global-2) ! dyU enddo endif - if (pgl_global_ext) then + if (save_ghte_ghtn) then do j = 1, ny_global - do i = 1, nx_global - G_HTE(i+nghost,j+nghost) = work_g(i,j) - enddo + do i = 1, nx_global + G_HTE(i+nghost,j+nghost) = work_g(i,j) + enddo enddo + call global_ext_halo(G_HTE) endif endif call scatter_global(HTE, work_g, master_task, distrb_info, & @@ -2190,6 +2176,48 @@ end subroutine primary_grid_lengths_HTE !======================================================================= +! This subroutine fills ghost cells in global extended grid + + subroutine global_ext_halo(array) + + real (kind=dbl_kind), dimension(:,:), intent(inout) :: & + array ! extended global grid size nx+2*nghost, ny+2*nghost + ! nghost+1:nghost+nx_global and nghost+1:nghost+ny_global filled on entry + + integer (kind=int_kind) :: n + + character(len=*), parameter :: subname = '(global_ext_halo)' + + do n = 1,nghost + if (ns_boundary_type =='cyclic') then + array(:,n) = array(:,ny_global+n) + array(:,ny_global+nghost+n) = array(:,nghost+n) + elseif (ns_boundary_type == 'open') then + array(:,n) = array(:,nghost+1) + array(:,ny_global+nghost+n) = array(:,ny_global+nghost) + else + array(:,n) = c0 + array(:,ny_global+nghost+n) = c0 + endif + enddo + + do n = 1,nghost + if (ew_boundary_type =='cyclic') then + array(n ,:) = array(nx_global+n,:) + array(nx_global+nghost+n,:) = array(nghost+n ,:) + elseif (ew_boundary_type == 'open') then + array(n ,:) = array(nghost+1 ,:) + array(nx_global+nghost+n,:) = array(nx_global+nghost,:) + else + array(n ,:) = c0 + array(nx_global+nghost+n,:) = c0 + endif + enddo + + end subroutine global_ext_halo + +!======================================================================= + ! Sets the boundary values for the T cell land mask (hm) and ! makes the logical land masks for T and U cells (tmask, umask) ! and N and E cells (nmask, emask). @@ -2199,10 +2227,6 @@ end subroutine primary_grid_lengths_HTE subroutine makemask - use ice_constants, only: c0, p5, c1p5, & - field_loc_center, field_loc_NEcorner, field_type_scalar, & - field_loc_Nface, field_loc_Eface - integer (kind=int_kind) :: & i, j, iblk, & ilo,ihi,jlo,jhi ! beginning and end of physical domain @@ -2353,10 +2377,6 @@ end subroutine makemask subroutine Tlatlon - use ice_constants, only: c0, c1, c1p5, c2, c4, p5, & - field_loc_center, field_loc_Nface, field_loc_Eface, & - field_type_scalar - integer (kind=int_kind) :: & i, j, iblk , & ! horizontal indices ilo,ihi,jlo,jhi ! beginning and end of physical domain @@ -2975,8 +2995,6 @@ end subroutine grid_average_X2Y_1f subroutine grid_average_X2YS(dir,work1,wght1,mask1,work2) - use ice_constants, only: c0 - character(len=*) , intent(in) :: & dir @@ -3206,8 +3224,6 @@ end subroutine grid_average_X2YS subroutine grid_average_X2YA(dir,work1,wght1,work2) - use ice_constants, only: c0 - character(len=*) , intent(in) :: & dir @@ -3436,8 +3452,6 @@ end subroutine grid_average_X2YA subroutine grid_average_X2YF(dir,work1,wght1,work2,wght2) - use ice_constants, only: c0, p25, p5 - character(len=*) , intent(in) :: & dir @@ -3640,8 +3654,6 @@ end subroutine grid_average_X2YF subroutine grid_average_X2Y_2(dir,work1a,wght1a,mask1a,work1b,wght1b,mask1b,work2) - use ice_constants, only: c0 - character(len=*) , intent(in) :: & dir @@ -3852,11 +3864,6 @@ end function grid_neighbor_max subroutine gridbox_corners - use ice_blocks, only: nx_block, ny_block - use ice_constants, only: c0, c2, c360, & - field_loc_NEcorner, field_type_scalar - use ice_domain_size, only: max_blocks - integer (kind=int_kind) :: & i,j,iblk,icorner,& ! index counters ilo,ihi,jlo,jhi ! beginning and end of physical domain @@ -4048,11 +4055,6 @@ end subroutine gridbox_corners subroutine gridbox_edges - use ice_blocks, only: nx_block, ny_block - use ice_constants, only: c0, c2, c360, & - field_loc_NEcorner, field_type_scalar - use ice_domain_size, only: max_blocks - integer (kind=int_kind) :: & i,j,iblk,icorner,& ! index counters ilo,ihi,jlo,jhi ! beginning and end of physical domain @@ -4348,11 +4350,6 @@ end subroutine gridbox_edges subroutine gridbox_verts(work_g,vbounds) - use ice_blocks, only: nx_block, ny_block - use ice_constants, only: c0, c2, & - field_loc_NEcorner, field_type_scalar - use ice_domain_size, only: max_blocks - real (kind=dbl_kind), dimension(:,:), intent(in) :: & work_g @@ -4467,8 +4464,6 @@ end subroutine gridbox_verts subroutine get_bathymetry - use ice_constants, only: c0 - integer (kind=int_kind) :: & i, j, k, iblk ! loop indices @@ -4660,7 +4655,6 @@ subroutine read_seabedstress_bathy ! use module use ice_read_write - use ice_constants, only: field_loc_center, field_type_scalar ! local variables integer (kind=int_kind) :: & diff --git a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 index 4efb13c52..3f87f2ca8 100644 --- a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 @@ -80,7 +80,7 @@ subroutine cice_init get_forcing_atmo, get_forcing_ocn, alloc_forcing, get_wave_spec use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, & faero_data, faero_default, alloc_forcing_bgc - use ice_grid, only: init_grid1, init_grid2, alloc_grid + use ice_grid, only: init_grid1, init_grid2, alloc_grid, dealloc_grid use ice_history, only: init_hist, accum_hist use ice_restart_shared, only: restart, runid, runtype use ice_init, only: input_data, init_state @@ -213,6 +213,7 @@ subroutine cice_init call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler + call dealloc_grid ! deallocate temporary grid arrays if (write_ic) call accum_hist(dt) ! write initial conditions end subroutine cice_init diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 index 69ecd4c91..7e2308f20 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 @@ -80,7 +80,7 @@ subroutine cice_init get_forcing_atmo, get_forcing_ocn, alloc_forcing, get_wave_spec use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, & faero_data, faero_default, alloc_forcing_bgc - use ice_grid, only: init_grid1, init_grid2, alloc_grid + use ice_grid, only: init_grid1, init_grid2, alloc_grid, dealloc_grid use ice_history, only: init_hist, accum_hist use ice_restart_shared, only: restart, runid, runtype use ice_init, only: input_data, init_state @@ -215,6 +215,7 @@ subroutine cice_init call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler + call dealloc_grid ! deallocate temporary grid arrays end subroutine cice_init !======================================================================= diff --git a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 index 3c5907c54..419dbacc9 100644 --- a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 @@ -82,7 +82,7 @@ subroutine cice_init(mpicom_ice) get_forcing_atmo, get_forcing_ocn, get_wave_spec, init_snowtable use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, & faero_default, alloc_forcing_bgc, fiso_default - use ice_grid, only: init_grid1, init_grid2, alloc_grid + use ice_grid, only: init_grid1, init_grid2, alloc_grid, dealloc_grid use ice_history, only: init_hist, accum_hist use ice_restart_shared, only: restart, runtype use ice_init, only: input_data, init_state @@ -241,6 +241,7 @@ subroutine cice_init(mpicom_ice) if (write_ic) call accum_hist(dt) ! write initial conditions + call dealloc_grid ! deallocate temporary grid arrays if (my_task == master_task) then call ice_memusage_print(nu_diag,subname//':end') endif diff --git a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 index 2ebcc696a..0c6bc9949 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 @@ -32,7 +32,7 @@ subroutine cice_init1() use ice_init , only: input_data use ice_init_column , only: input_zbgc, count_tracers - use ice_grid , only: init_grid1, alloc_grid + use ice_grid , only: init_grid1, alloc_grid, dealloc_grid use ice_domain , only: init_domain_blocks use ice_arrays_column , only: alloc_arrays_column use ice_state , only: alloc_state @@ -201,6 +201,8 @@ subroutine cice_init2() call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler + call dealloc_grid ! deallocate temporary grid arrays + end subroutine cice_init2 !======================================================================= diff --git a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 index cb85f86bd..4577113f1 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 @@ -87,7 +87,7 @@ subroutine cice_init(mpi_comm) get_forcing_atmo, get_forcing_ocn, get_wave_spec, init_snowtable use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, & faero_default, alloc_forcing_bgc, fiso_default - use ice_grid, only: init_grid1, init_grid2, alloc_grid + use ice_grid, only: init_grid1, init_grid2, alloc_grid, dealloc_grid use ice_history, only: init_hist, accum_hist use ice_restart_shared, only: restart, runtype use ice_init, only: input_data, init_state @@ -259,6 +259,7 @@ subroutine cice_init(mpi_comm) if (write_ic) call accum_hist(dt) ! write initial conditions + call dealloc_grid ! deallocate temporary grid arrays if (my_task == master_task) then call ice_memusage_print(nu_diag,subname//':end') endif diff --git a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 index 38000446a..a48bdda30 100644 --- a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 @@ -82,7 +82,7 @@ subroutine cice_init get_forcing_atmo, get_forcing_ocn, get_wave_spec, init_snowtable use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, & faero_default, alloc_forcing_bgc, fiso_default - use ice_grid, only: init_grid1, init_grid2, alloc_grid + use ice_grid, only: init_grid1, init_grid2, alloc_grid, dealloc_grid use ice_history, only: init_hist, accum_hist use ice_restart_shared, only: restart, runtype use ice_init, only: input_data, init_state @@ -243,6 +243,8 @@ subroutine cice_init call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler + call dealloc_grid ! deallocate temporary grid arrays + if (my_task == master_task) then call ice_memusage_print(nu_diag,subname//':end') endif diff --git a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 index 38000446a..cb1241a5e 100644 --- a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 @@ -82,7 +82,7 @@ subroutine cice_init get_forcing_atmo, get_forcing_ocn, get_wave_spec, init_snowtable use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, & faero_default, alloc_forcing_bgc, fiso_default - use ice_grid, only: init_grid1, init_grid2, alloc_grid + use ice_grid, only: init_grid1, init_grid2, alloc_grid, dealloc_grid use ice_history, only: init_hist, accum_hist use ice_restart_shared, only: restart, runtype use ice_init, only: input_data, init_state @@ -243,6 +243,7 @@ subroutine cice_init call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler + call dealloc_grid ! deallocate temporary grid arrays if (my_task == master_task) then call ice_memusage_print(nu_diag,subname//':end') endif diff --git a/cicecore/drivers/unittest/halochk/CICE_InitMod.F90 b/cicecore/drivers/unittest/halochk/CICE_InitMod.F90 index 38000446a..cb1241a5e 100644 --- a/cicecore/drivers/unittest/halochk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/halochk/CICE_InitMod.F90 @@ -82,7 +82,7 @@ subroutine cice_init get_forcing_atmo, get_forcing_ocn, get_wave_spec, init_snowtable use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, & faero_default, alloc_forcing_bgc, fiso_default - use ice_grid, only: init_grid1, init_grid2, alloc_grid + use ice_grid, only: init_grid1, init_grid2, alloc_grid, dealloc_grid use ice_history, only: init_hist, accum_hist use ice_restart_shared, only: restart, runtype use ice_init, only: input_data, init_state @@ -243,6 +243,7 @@ subroutine cice_init call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler + call dealloc_grid ! deallocate temporary grid arrays if (my_task == master_task) then call ice_memusage_print(nu_diag,subname//':end') endif diff --git a/cicecore/drivers/unittest/opticep/CICE_InitMod.F90 b/cicecore/drivers/unittest/opticep/CICE_InitMod.F90 index 38000446a..cb1241a5e 100644 --- a/cicecore/drivers/unittest/opticep/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/opticep/CICE_InitMod.F90 @@ -82,7 +82,7 @@ subroutine cice_init get_forcing_atmo, get_forcing_ocn, get_wave_spec, init_snowtable use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, & faero_default, alloc_forcing_bgc, fiso_default - use ice_grid, only: init_grid1, init_grid2, alloc_grid + use ice_grid, only: init_grid1, init_grid2, alloc_grid, dealloc_grid use ice_history, only: init_hist, accum_hist use ice_restart_shared, only: restart, runtype use ice_init, only: input_data, init_state @@ -243,6 +243,7 @@ subroutine cice_init call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler + call dealloc_grid ! deallocate temporary grid arrays if (my_task == master_task) then call ice_memusage_print(nu_diag,subname//':end') endif diff --git a/cicecore/drivers/unittest/opticep/ice_step_mod.F90 b/cicecore/drivers/unittest/opticep/ice_step_mod.F90 index ba19436bd..5b85cb7bf 100644 --- a/cicecore/drivers/unittest/opticep/ice_step_mod.F90 +++ b/cicecore/drivers/unittest/opticep/ice_step_mod.F90 @@ -760,7 +760,7 @@ subroutine update_state (dt, daidt, dvidt, dagedt, offset) use ice_state, only: aicen, trcrn, vicen, vsnon, & aice, trcr, vice, vsno, aice0, trcr_depend, & bound_state, trcr_base, nt_strata, n_trcr_strata - use ice_flux, only: Tf + use ice_flux, only: Tf use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound, timer_updstate real (kind=dbl_kind), intent(in) :: & diff --git a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 index 38000446a..cb1241a5e 100644 --- a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 @@ -82,7 +82,7 @@ subroutine cice_init get_forcing_atmo, get_forcing_ocn, get_wave_spec, init_snowtable use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, & faero_default, alloc_forcing_bgc, fiso_default - use ice_grid, only: init_grid1, init_grid2, alloc_grid + use ice_grid, only: init_grid1, init_grid2, alloc_grid, dealloc_grid use ice_history, only: init_hist, accum_hist use ice_restart_shared, only: restart, runtype use ice_init, only: input_data, init_state @@ -243,6 +243,7 @@ subroutine cice_init call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler + call dealloc_grid ! deallocate temporary grid arrays if (my_task == master_task) then call ice_memusage_print(nu_diag,subname//':end') endif